From 57e82388e5c6b8bc4a6faf5868ac209d05c98242 Mon Sep 17 00:00:00 2001 From: tinswzy Date: Thu, 28 Nov 2024 20:34:04 +0800 Subject: [PATCH] refine exists log print with ctx Signed-off-by: tinswzy --- internal/datacoord/channel_manager.go | 24 +-- internal/datacoord/index_meta.go | 24 +-- internal/datacoord/services.go | 10 +- internal/datanode/services.go | 16 +- .../distributed/datanode/client/client.go | 4 +- .../distributed/querynode/client/client.go | 4 +- internal/indexnode/task_scheduler.go | 4 +- internal/indexnode/task_stats.go | 30 ++-- internal/metastore/kv/datacoord/kv_catalog.go | 38 ++--- .../metastore/kv/querycoord/kv_catalog.go | 2 +- internal/metastore/kv/rootcoord/kv_catalog.go | 147 +++++++++--------- .../metastore/kv/rootcoord/kv_catalog_test.go | 6 +- internal/proxy/impl.go | 26 ++-- internal/proxy/msg_pack.go | 18 +-- internal/proxy/search_reduce_util.go | 6 +- internal/proxy/task.go | 40 ++--- internal/proxy/task_database.go | 4 +- internal/proxy/task_delete.go | 22 +-- internal/proxy/task_delete_streaming.go | 4 +- internal/proxy/task_flush_streaming.go | 6 +- internal/proxy/task_index.go | 36 ++--- internal/proxy/task_index_test.go | 62 ++++---- internal/proxy/task_insert.go | 6 +- internal/proxy/task_insert_streaming.go | 6 +- internal/proxy/task_policies.go | 4 +- internal/proxy/task_query.go | 4 +- internal/proxy/task_scheduler.go | 12 +- internal/proxy/task_search.go | 25 +-- internal/proxy/task_search_test.go | 4 +- internal/proxy/task_statistic.go | 14 +- internal/proxy/task_test.go | 8 +- internal/proxy/task_upsert.go | 2 +- internal/proxy/util.go | 16 +- internal/querycoordv2/handlers.go | 4 +- internal/querycoordv2/ops_services.go | 8 +- internal/querycoordv2/services.go | 15 +- internal/querycoordv2/task/scheduler.go | 34 ++-- internal/querynodev2/services.go | 14 +- internal/querynodev2/services_test.go | 2 +- internal/rootcoord/alter_collection_task.go | 6 +- internal/rootcoord/alter_database_task.go | 4 +- internal/rootcoord/broker.go | 4 +- internal/rootcoord/create_collection_task.go | 52 +++---- .../rootcoord/create_collection_task_test.go | 70 ++++----- internal/rootcoord/create_partition_task.go | 2 +- internal/rootcoord/dml_channels.go | 18 +-- internal/rootcoord/drop_collection_task.go | 2 +- internal/rootcoord/drop_partition_task.go | 2 +- internal/rootcoord/list_db_task.go | 2 +- internal/rootcoord/meta_table.go | 56 +++---- internal/rootcoord/meta_table_test.go | 6 +- internal/rootcoord/root_coord.go | 35 ++--- internal/rootcoord/show_collection_task.go | 2 +- internal/rootcoord/step.go | 4 + internal/rootcoord/step_executor.go | 4 +- internal/rootcoord/timeticksync.go | 2 +- internal/rootcoord/undo.go | 2 +- internal/util/sessionutil/session_util.go | 8 +- 58 files changed, 500 insertions(+), 492 deletions(-) diff --git a/internal/datacoord/channel_manager.go b/internal/datacoord/channel_manager.go index c144d35c62a74..28bab70ac18a2 100644 --- a/internal/datacoord/channel_manager.go +++ b/internal/datacoord/channel_manager.go @@ -166,7 +166,7 @@ func (m *ChannelManagerImpl) Startup(ctx context.Context, legacyNodes, allNodes } if m.balanceCheckLoop != nil { - log.Info("starting channel balance loop") + log.Ctx(ctx).Info("starting channel balance loop") m.wg.Add(1) go func() { defer m.wg.Done() @@ -174,7 +174,7 @@ func (m *ChannelManagerImpl) Startup(ctx context.Context, legacyNodes, allNodes }() } - log.Info("cluster start up", + log.Ctx(ctx).Info("cluster start up", zap.Int64s("allNodes", allNodes), zap.Int64s("legacyNodes", legacyNodes), zap.Int64s("oldNodes", oNodes), @@ -242,6 +242,7 @@ func (m *ChannelManagerImpl) Watch(ctx context.Context, ch RWChannel) error { log.Info("Add channel") updates := NewChannelOpSet(NewChannelOp(bufferID, Watch, ch)) + // TODO fill in traceID to channelOp's watchInfo err := m.execute(updates) if err != nil { log.Warn("fail to update new channel updates into meta", @@ -255,6 +256,7 @@ func (m *ChannelManagerImpl) Watch(ctx context.Context, ch RWChannel) error { return nil } + // TODO fill in traceID to channelOp's watchInfo if err := m.execute(updates); err != nil { log.Warn("fail to assign channel, will retry later", zap.Array("updates", updates), zap.Error(err)) return nil @@ -489,7 +491,7 @@ func (m *ChannelManagerImpl) finishRemoveChannel(nodeID int64, channels ...RWCha } } -func (m *ChannelManagerImpl) advanceStandbys(_ context.Context, standbys []*NodeChannelInfo) bool { +func (m *ChannelManagerImpl) advanceStandbys(ctx context.Context, standbys []*NodeChannelInfo) bool { var advanced bool = false for _, nodeAssign := range standbys { validChannels := make(map[string]RWChannel) @@ -516,7 +518,7 @@ func (m *ChannelManagerImpl) advanceStandbys(_ context.Context, standbys []*Node chNames := lo.Keys(validChannels) if err := m.reassign(nodeAssign); err != nil { - log.Warn("Reassign channels fail", + log.Ctx(ctx).Warn("Reassign channels fail", zap.Int64("nodeID", nodeAssign.NodeID), zap.Strings("channels", chNames), zap.Error(err), @@ -524,7 +526,7 @@ func (m *ChannelManagerImpl) advanceStandbys(_ context.Context, standbys []*Node continue } - log.Info("Reassign standby channels to node", + log.Ctx(ctx).Info("Reassign standby channels to node", zap.Int64("nodeID", nodeAssign.NodeID), zap.Strings("channels", chNames), ) @@ -550,7 +552,7 @@ func (m *ChannelManagerImpl) advanceToNotifies(ctx context.Context, toNotifies [ ) chNames := lo.Keys(nodeAssign.Channels) - log.Info("Notify channel operations to datanode", + log.Ctx(ctx).Info("Notify channel operations to datanode", zap.Int64("assignment", nodeAssign.NodeID), zap.Int("total operation count", len(nodeAssign.Channels)), zap.Strings("channel names", chNames), @@ -577,7 +579,7 @@ func (m *ChannelManagerImpl) advanceToNotifies(ctx context.Context, toNotifies [ } } - log.Info("Finish to notify channel operations to datanode", + log.Ctx(ctx).Info("Finish to notify channel operations to datanode", zap.Int64("assignment", nodeAssign.NodeID), zap.Int("operation count", channelCount), zap.Int("success count", len(succeededChannels)), @@ -608,7 +610,7 @@ func (m *ChannelManagerImpl) advanceToChecks(ctx context.Context, toChecks []*No futures := make([]*conc.Future[any], 0, len(nodeAssign.Channels)) chNames := lo.Keys(nodeAssign.Channels) - log.Info("Check ToWatch/ToRelease channel operations progress", + log.Ctx(ctx).Info("Check ToWatch/ToRelease channel operations progress", zap.Int("channel count", len(nodeAssign.Channels)), zap.Strings("channel names", chNames), ) @@ -641,7 +643,7 @@ func (m *ChannelManagerImpl) advanceToChecks(ctx context.Context, toChecks []*No } } - log.Info("Finish to Check ToWatch/ToRelease channel operations progress", + log.Ctx(ctx).Info("Finish to Check ToWatch/ToRelease channel operations progress", zap.Int("channel count", len(nodeAssign.Channels)), zap.Strings("channel names", chNames), ) @@ -650,7 +652,7 @@ func (m *ChannelManagerImpl) advanceToChecks(ctx context.Context, toChecks []*No } func (m *ChannelManagerImpl) Notify(ctx context.Context, nodeID int64, info *datapb.ChannelWatchInfo) error { - log := log.With( + log := log.Ctx(ctx).With( zap.String("channel", info.GetVchan().GetChannelName()), zap.Int64("assignment", nodeID), zap.String("operation", info.GetState().String()), @@ -666,7 +668,7 @@ func (m *ChannelManagerImpl) Notify(ctx context.Context, nodeID int64, info *dat } func (m *ChannelManagerImpl) Check(ctx context.Context, nodeID int64, info *datapb.ChannelWatchInfo) (successful bool, got bool) { - log := log.With( + log := log.Ctx(ctx).With( zap.Int64("opID", info.GetOpID()), zap.Int64("nodeID", nodeID), zap.String("check operation", info.GetState().String()), diff --git a/internal/datacoord/index_meta.go b/internal/datacoord/index_meta.go index ce66352d562cd..191d18ba26c8a 100644 --- a/internal/datacoord/index_meta.go +++ b/internal/datacoord/index_meta.go @@ -375,20 +375,20 @@ func (m *indexMeta) HasSameReq(req *indexpb.CreateIndexRequest) (bool, UniqueID) } func (m *indexMeta) CreateIndex(ctx context.Context, index *model.Index) error { - log.Info("meta update: CreateIndex", zap.Int64("collectionID", index.CollectionID), + log.Ctx(ctx).Info("meta update: CreateIndex", zap.Int64("collectionID", index.CollectionID), zap.Int64("fieldID", index.FieldID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName)) m.Lock() defer m.Unlock() if err := m.catalog.CreateIndex(ctx, index); err != nil { - log.Error("meta update: CreateIndex save meta fail", zap.Int64("collectionID", index.CollectionID), + log.Ctx(ctx).Error("meta update: CreateIndex save meta fail", zap.Int64("collectionID", index.CollectionID), zap.Int64("fieldID", index.FieldID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName), zap.Error(err)) return err } m.updateCollectionIndex(index) - log.Info("meta update: CreateIndex success", zap.Int64("collectionID", index.CollectionID), + log.Ctx(ctx).Info("meta update: CreateIndex success", zap.Int64("collectionID", index.CollectionID), zap.Int64("fieldID", index.FieldID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName)) return nil } @@ -415,19 +415,19 @@ func (m *indexMeta) AddSegmentIndex(ctx context.Context, segIndex *model.Segment defer m.Unlock() buildID := segIndex.BuildID - log.Info("meta update: adding segment index", zap.Int64("collectionID", segIndex.CollectionID), + log.Ctx(ctx).Info("meta update: adding segment index", zap.Int64("collectionID", segIndex.CollectionID), zap.Int64("segmentID", segIndex.SegmentID), zap.Int64("indexID", segIndex.IndexID), zap.Int64("buildID", buildID)) segIndex.IndexState = commonpb.IndexState_Unissued if err := m.catalog.CreateSegmentIndex(ctx, segIndex); err != nil { - log.Warn("meta update: adding segment index failed", + log.Ctx(ctx).Warn("meta update: adding segment index failed", zap.Int64("segmentID", segIndex.SegmentID), zap.Int64("indexID", segIndex.IndexID), zap.Int64("buildID", segIndex.BuildID), zap.Error(err)) return err } m.updateSegmentIndex(segIndex) - log.Info("meta update: adding segment index success", zap.Int64("collectionID", segIndex.CollectionID), + log.Ctx(ctx).Info("meta update: adding segment index success", zap.Int64("collectionID", segIndex.CollectionID), zap.Int64("segmentID", segIndex.SegmentID), zap.Int64("indexID", segIndex.IndexID), zap.Int64("buildID", buildID)) m.updateIndexTasksMetrics() @@ -562,7 +562,7 @@ func (m *indexMeta) GetFieldIndexes(collID, fieldID UniqueID, indexName string) // MarkIndexAsDeleted will mark the corresponding index as deleted, and recycleUnusedIndexFiles will recycle these tasks. func (m *indexMeta) MarkIndexAsDeleted(ctx context.Context, collID UniqueID, indexIDs []UniqueID) error { - log.Info("IndexCoord metaTable MarkIndexAsDeleted", zap.Int64("collectionID", collID), + log.Ctx(ctx).Info("IndexCoord metaTable MarkIndexAsDeleted", zap.Int64("collectionID", collID), zap.Int64s("indexIDs", indexIDs)) m.Lock() @@ -587,14 +587,14 @@ func (m *indexMeta) MarkIndexAsDeleted(ctx context.Context, collID UniqueID, ind } err := m.catalog.AlterIndexes(ctx, indexes) if err != nil { - log.Error("failed to alter index meta in meta store", zap.Int("indexes num", len(indexes)), zap.Error(err)) + log.Ctx(ctx).Error("failed to alter index meta in meta store", zap.Int("indexes num", len(indexes)), zap.Error(err)) return err } for _, index := range indexes { m.indexes[index.CollectionID][index.IndexID] = index } - log.Info("IndexCoord metaTable MarkIndexAsDeleted success", zap.Int64("collectionID", collID), zap.Int64s("indexIDs", indexIDs)) + log.Ctx(ctx).Info("IndexCoord metaTable MarkIndexAsDeleted success", zap.Int64("collectionID", collID), zap.Int64s("indexIDs", indexIDs)) return nil } @@ -930,10 +930,10 @@ func (m *indexMeta) GetDeletedIndexes() []*model.Index { func (m *indexMeta) RemoveIndex(ctx context.Context, collID, indexID UniqueID) error { m.Lock() defer m.Unlock() - log.Info("IndexCoord meta table remove index", zap.Int64("collectionID", collID), zap.Int64("indexID", indexID)) + log.Ctx(ctx).Info("IndexCoord meta table remove index", zap.Int64("collectionID", collID), zap.Int64("indexID", indexID)) err := m.catalog.DropIndex(ctx, collID, indexID) if err != nil { - log.Info("IndexCoord meta table remove index fail", zap.Int64("collectionID", collID), + log.Ctx(ctx).Info("IndexCoord meta table remove index fail", zap.Int64("collectionID", collID), zap.Int64("indexID", indexID), zap.Error(err)) return err } @@ -946,7 +946,7 @@ func (m *indexMeta) RemoveIndex(ctx context.Context, collID, indexID UniqueID) e metrics.IndexTaskNum.Delete(prometheus.Labels{"collection_id": strconv.FormatInt(collID, 10), "index_task_status": metrics.FinishedIndexTaskLabel}) metrics.IndexTaskNum.Delete(prometheus.Labels{"collection_id": strconv.FormatInt(collID, 10), "index_task_status": metrics.FailedIndexTaskLabel}) } - log.Info("IndexCoord meta table remove index success", zap.Int64("collectionID", collID), zap.Int64("indexID", indexID)) + log.Ctx(ctx).Info("IndexCoord meta table remove index success", zap.Int64("collectionID", collID), zap.Int64("indexID", indexID)) return nil } diff --git a/internal/datacoord/services.go b/internal/datacoord/services.go index dc19a9abab03c..8b8d042b2a65f 100644 --- a/internal/datacoord/services.go +++ b/internal/datacoord/services.go @@ -690,7 +690,7 @@ func (s *Server) GetStateCode() commonpb.StateCode { // GetComponentStates returns DataCoord's current state func (s *Server) GetComponentStates(ctx context.Context, req *milvuspb.GetComponentStatesRequest) (*milvuspb.ComponentStates, error) { code := s.GetStateCode() - log.Debug("DataCoord current state", zap.String("StateCode", code.String())) + log.Ctx(ctx).Debug("DataCoord current state", zap.String("StateCode", code.String())) nodeID := common.NotRegisteredID if s.session != nil && s.session.Registered() { nodeID = s.session.GetServerID() // or Params.NodeID @@ -1528,12 +1528,12 @@ func (s *Server) handleDataNodeTtMsg(ctx context.Context, ttMsg *msgpb.DataNodeT // An error status will be returned and error will be logged, if we failed to mark *all* segments. // Deprecated, do not use it func (s *Server) MarkSegmentsDropped(ctx context.Context, req *datapb.MarkSegmentsDroppedRequest) (*commonpb.Status, error) { - log.Info("marking segments dropped", zap.Int64s("segments", req.GetSegmentIds())) + log.Ctx(ctx).Info("marking segments dropped", zap.Int64s("segments", req.GetSegmentIds())) var err error for _, segID := range req.GetSegmentIds() { if err = s.meta.SetState(ctx, segID, commonpb.SegmentState_Dropped); err != nil { // Fail-open. - log.Error("failed to set segment state as dropped", zap.Int64("segmentID", segID)) + log.Ctx(ctx).Error("failed to set segment state as dropped", zap.Int64("segmentID", segID)) break } } @@ -1664,7 +1664,7 @@ func (s *Server) ImportV2(ctx context.Context, in *internalpb.ImportRequestInter Status: merr.Success(), } - log := log.With(zap.Int64("collection", in.GetCollectionID()), + log := log.Ctx(ctx).With(zap.Int64("collection", in.GetCollectionID()), zap.Int64s("partitions", in.GetPartitionIDs()), zap.Strings("channels", in.GetChannelNames())) log.Info("receive import request", zap.Any("files", in.GetFiles()), zap.Any("options", in.GetOptions())) @@ -1749,7 +1749,7 @@ func (s *Server) ImportV2(ctx context.Context, in *internalpb.ImportRequestInter } func (s *Server) GetImportProgress(ctx context.Context, in *internalpb.GetImportProgressRequest) (*internalpb.GetImportProgressResponse, error) { - log := log.With(zap.String("jobID", in.GetJobID())) + log := log.Ctx(ctx).With(zap.String("jobID", in.GetJobID())) if err := merr.CheckHealthy(s.GetStateCode()); err != nil { return &internalpb.GetImportProgressResponse{ Status: merr.Status(err), diff --git a/internal/datanode/services.go b/internal/datanode/services.go index fa14b7d4182a5..825f63a14c1ad 100644 --- a/internal/datanode/services.go +++ b/internal/datanode/services.go @@ -51,7 +51,7 @@ import ( // WatchDmChannels is not in use func (node *DataNode) WatchDmChannels(ctx context.Context, in *datapb.WatchDmChannelsRequest) (*commonpb.Status, error) { - log.Warn("DataNode WatchDmChannels is not in use") + log.Ctx(ctx).Warn("DataNode WatchDmChannels is not in use") // TODO ERROR OF GRPC NOT IN USE return merr.Success(), nil @@ -61,7 +61,7 @@ func (node *DataNode) WatchDmChannels(ctx context.Context, in *datapb.WatchDmCha func (node *DataNode) GetComponentStates(ctx context.Context, req *milvuspb.GetComponentStatesRequest) (*milvuspb.ComponentStates, error) { nodeID := common.NotRegisteredID state := node.stateCode.Load().(commonpb.StateCode) - log.Debug("DataNode current state", zap.String("State", state.String())) + log.Ctx(ctx).Debug("DataNode current state", zap.String("State", state.String())) if node.GetSession() != nil && node.session.Registered() { nodeID = node.GetSession().ServerID } @@ -133,9 +133,9 @@ func (node *DataNode) GetStatisticsChannel(ctx context.Context, req *internalpb. // ShowConfigurations returns the configurations of DataNode matching req.Pattern func (node *DataNode) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) { - log.Debug("DataNode.ShowConfigurations", zap.String("pattern", req.Pattern)) + log.Ctx(ctx).Debug("DataNode.ShowConfigurations", zap.String("pattern", req.Pattern)) if err := merr.CheckHealthy(node.GetStateCode()); err != nil { - log.Warn("DataNode.ShowConfigurations failed", zap.Int64("nodeId", node.GetNodeID()), zap.Error(err)) + log.Ctx(ctx).Warn("DataNode.ShowConfigurations failed", zap.Int64("nodeId", node.GetNodeID()), zap.Error(err)) return &internalpb.ShowConfigurationsResponse{ Status: merr.Status(err), @@ -160,7 +160,7 @@ func (node *DataNode) ShowConfigurations(ctx context.Context, req *internalpb.Sh // GetMetrics return datanode metrics func (node *DataNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) { if err := merr.CheckHealthy(node.GetStateCode()); err != nil { - log.Warn("DataNode.GetMetrics failed", zap.Int64("nodeId", node.GetNodeID()), zap.Error(err)) + log.Ctx(ctx).Warn("DataNode.GetMetrics failed", zap.Int64("nodeId", node.GetNodeID()), zap.Error(err)) return &milvuspb.GetMetricsResponse{ Status: merr.Status(err), @@ -252,7 +252,7 @@ func (node *DataNode) CompactionV2(ctx context.Context, req *datapb.CompactionPl // return status of all compaction plans func (node *DataNode) GetCompactionState(ctx context.Context, req *datapb.CompactionStateRequest) (*datapb.CompactionStateResponse, error) { if err := merr.CheckHealthy(node.GetStateCode()); err != nil { - log.Warn("DataNode.GetCompactionState failed", zap.Int64("nodeId", node.GetNodeID()), zap.Error(err)) + log.Ctx(ctx).Warn("DataNode.GetCompactionState failed", zap.Int64("nodeId", node.GetNodeID()), zap.Error(err)) return &datapb.CompactionStateResponse{ Status: merr.Status(err), }, nil @@ -351,9 +351,9 @@ func (node *DataNode) SyncSegments(ctx context.Context, req *datapb.SyncSegments } func (node *DataNode) NotifyChannelOperation(ctx context.Context, req *datapb.ChannelOperationsRequest) (*commonpb.Status, error) { - log.Ctx(ctx).Info("DataNode receives NotifyChannelOperation", - zap.Int("operation count", len(req.GetInfos()))) + log := log.Ctx(ctx).With(zap.Int("operation count", len(req.GetInfos()))) + log.Info("DataNode receives NotifyChannelOperation") if node.channelManager == nil { log.Warn("DataNode NotifyChannelOperation failed due to nil channelManager") return merr.Status(merr.WrapErrServiceInternal("channelManager is nil! Ignore if you are upgrading datanode/coord to rpc based watch")), nil diff --git a/internal/distributed/datanode/client/client.go b/internal/distributed/datanode/client/client.go index 5859e7ee33e5c..c78d9b38ad925 100644 --- a/internal/distributed/datanode/client/client.go +++ b/internal/distributed/datanode/client/client.go @@ -56,7 +56,7 @@ func NewClient(ctx context.Context, addr string, serverID int64) (types.DataNode sess := sessionutil.NewSession(ctx) if sess == nil { err := fmt.Errorf("new session error, maybe can not connect to etcd") - log.Debug("DataNodeClient New Etcd Session failed", zap.Error(err)) + log.Ctx(ctx).Debug("DataNodeClient New Etcd Session failed", zap.Error(err)) return nil, err } config := &Params.DataNodeGrpcClientCfg @@ -77,7 +77,7 @@ func NewClient(ctx context.Context, addr string, serverID int64) (types.DataNode client.grpcClient.EnableEncryption() cp, err := utils.CreateCertPoolforClient(Params.InternalTLSCfg.InternalTLSCaPemPath.GetValue(), "DataNode") if err != nil { - log.Error("Failed to create cert pool for DataNode client") + log.Ctx(ctx).Error("Failed to create cert pool for DataNode client") return nil, err } client.grpcClient.SetInternalTLSCertPool(cp) diff --git a/internal/distributed/querynode/client/client.go b/internal/distributed/querynode/client/client.go index 7dfe4dc8be62a..568aa80c4b846 100644 --- a/internal/distributed/querynode/client/client.go +++ b/internal/distributed/querynode/client/client.go @@ -56,7 +56,7 @@ func NewClient(ctx context.Context, addr string, nodeID int64) (types.QueryNodeC sess := sessionutil.NewSession(ctx) if sess == nil { err := fmt.Errorf("new session error, maybe can not connect to etcd") - log.Debug("QueryNodeClient NewClient failed", zap.Error(err)) + log.Ctx(ctx).Debug("QueryNodeClient NewClient failed", zap.Error(err)) return nil, err } config := ¶mtable.Get().QueryNodeGrpcClientCfg @@ -77,7 +77,7 @@ func NewClient(ctx context.Context, addr string, nodeID int64) (types.QueryNodeC client.grpcClient.EnableEncryption() cp, err := utils.CreateCertPoolforClient(Params.InternalTLSCfg.InternalTLSCaPemPath.GetValue(), "QueryNode") if err != nil { - log.Error("Failed to create cert pool for QueryNode client") + log.Ctx(ctx).Error("Failed to create cert pool for QueryNode client") return nil, err } client.grpcClient.SetInternalTLSCertPool(cp) diff --git a/internal/indexnode/task_scheduler.go b/internal/indexnode/task_scheduler.go index 3f5c986149cad..caab74bcccdfd 100644 --- a/internal/indexnode/task_scheduler.go +++ b/internal/indexnode/task_scheduler.go @@ -124,7 +124,7 @@ func (queue *IndexTaskQueue) PopActiveTask(tName string) task { delete(queue.activeTasks, tName) return t } - log.Debug("IndexNode task was not found in the active task list", zap.String("TaskName", tName)) + log.Ctx(queue.sched.ctx).Debug("IndexNode task was not found in the active task list", zap.String("TaskName", tName)) return nil } @@ -246,7 +246,7 @@ func (sched *TaskScheduler) processTask(t task, q TaskQueue) { } func (sched *TaskScheduler) indexBuildLoop() { - log.Debug("IndexNode TaskScheduler start build loop ...") + log.Ctx(sched.ctx).Debug("IndexNode TaskScheduler start build loop ...") defer sched.wg.Done() for { select { diff --git a/internal/indexnode/task_stats.go b/internal/indexnode/task_stats.go index 336e6354a6511..0ace1334a62a6 100644 --- a/internal/indexnode/task_stats.go +++ b/internal/indexnode/task_stats.go @@ -127,13 +127,13 @@ func (st *statsTask) PreExecute(ctx context.Context) error { if err := binlog.DecompressBinLog(storage.InsertBinlog, st.req.GetCollectionID(), st.req.GetPartitionID(), st.req.GetSegmentID(), st.req.GetInsertLogs()); err != nil { - log.Warn("Decompress insert binlog error", zap.Error(err)) + log.Ctx(ctx).Warn("Decompress insert binlog error", zap.Error(err)) return err } if err := binlog.DecompressBinLog(storage.DeleteBinlog, st.req.GetCollectionID(), st.req.GetPartitionID(), st.req.GetSegmentID(), st.req.GetDeltaLogs()); err != nil { - log.Warn("Decompress delta binlog error", zap.Error(err)) + log.Ctx(ctx).Warn("Decompress delta binlog error", zap.Error(err)) return err } @@ -162,7 +162,7 @@ func (st *statsTask) sortSegment(ctx context.Context) ([]*datapb.FieldBinlog, er bm25FieldIds := compaction.GetBM25FieldIDs(st.req.GetSchema()) writer, err := compaction.NewSegmentWriter(st.req.GetSchema(), numRows, statsBatchSize, st.req.GetTargetSegmentID(), st.req.GetPartitionID(), st.req.GetCollectionID(), bm25FieldIds) if err != nil { - log.Warn("sort segment wrong, unable to init segment writer", + log.Ctx(ctx).Warn("sort segment wrong, unable to init segment writer", zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err)) return nil, err } @@ -181,7 +181,7 @@ func (st *statsTask) sortSegment(ctx context.Context) ([]*datapb.FieldBinlog, er downloadStart := time.Now() values, err := st.downloadData(ctx, numRows, writer.GetPkID(), bm25FieldIds) if err != nil { - log.Warn("download data failed", zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err)) + log.Ctx(ctx).Warn("download data failed", zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err)) return nil, err } downloadCost = time.Since(downloadStart) @@ -195,7 +195,7 @@ func (st *statsTask) sortSegment(ctx context.Context) ([]*datapb.FieldBinlog, er for i, v := range values { err := writer.Write(v) if err != nil { - log.Warn("write value wrong, failed to writer row", zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err)) + log.Ctx(ctx).Warn("write value wrong, failed to writer row", zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err)) return nil, err } @@ -203,7 +203,7 @@ func (st *statsTask) sortSegment(ctx context.Context) ([]*datapb.FieldBinlog, er serWriteStart := time.Now() binlogNum, kvs, partialBinlogs, err := serializeWrite(ctx, st.req.GetStartLogID()+st.logIDOffset, writer) if err != nil { - log.Warn("stats wrong, failed to serialize writer", zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err)) + log.Ctx(ctx).Warn("stats wrong, failed to serialize writer", zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err)) return nil, err } serWriteTimeCost += time.Since(serWriteStart) @@ -214,7 +214,7 @@ func (st *statsTask) sortSegment(ctx context.Context) ([]*datapb.FieldBinlog, er flushBatchCount++ st.logIDOffset += binlogNum if st.req.GetStartLogID()+st.logIDOffset >= st.req.GetEndLogID() { - log.Warn("binlog files too much, log is not enough", zap.Int64("taskID", st.req.GetTaskID()), + log.Ctx(ctx).Warn("binlog files too much, log is not enough", zap.Int64("taskID", st.req.GetTaskID()), zap.Int64("binlog num", binlogNum), zap.Int64("startLogID", st.req.GetStartLogID()), zap.Int64("endLogID", st.req.GetEndLogID()), zap.Int64("logIDOffset", st.logIDOffset)) return nil, fmt.Errorf("binlog files too much, log is not enough") @@ -226,7 +226,7 @@ func (st *statsTask) sortSegment(ctx context.Context) ([]*datapb.FieldBinlog, er serWriteStart := time.Now() binlogNum, kvs, partialBinlogs, err := serializeWrite(ctx, st.req.GetStartLogID()+st.logIDOffset, writer) if err != nil { - log.Warn("stats wrong, failed to serialize writer", zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err)) + log.Ctx(ctx).Warn("stats wrong, failed to serialize writer", zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err)) return nil, err } serWriteTimeCost += time.Since(serWriteStart) @@ -239,14 +239,14 @@ func (st *statsTask) sortSegment(ctx context.Context) ([]*datapb.FieldBinlog, er err = conc.AwaitAll(uploadFutures...) if err != nil { - log.Warn("stats wrong, failed to upload kvs", zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err)) + log.Ctx(ctx).Warn("stats wrong, failed to upload kvs", zap.Int64("taskID", st.req.GetTaskID()), zap.Error(err)) return nil, err } serWriteStart := time.Now() binlogNums, sPath, err := statSerializeWrite(ctx, st.binlogIO, st.req.GetStartLogID()+st.logIDOffset, writer, numRows) if err != nil { - log.Warn("stats wrong, failed to serialize write segment stats", zap.Int64("taskID", st.req.GetTaskID()), + log.Ctx(ctx).Warn("stats wrong, failed to serialize write segment stats", zap.Int64("taskID", st.req.GetTaskID()), zap.Int64("remaining row count", numRows), zap.Error(err)) return nil, err } @@ -258,7 +258,7 @@ func (st *statsTask) sortSegment(ctx context.Context) ([]*datapb.FieldBinlog, er if len(bm25FieldIds) > 0 { binlogNums, bm25StatsLogs, err = bm25SerializeWrite(ctx, st.binlogIO, st.req.GetStartLogID()+st.logIDOffset, writer, numRows) if err != nil { - log.Warn("compact wrong, failed to serialize write segment bm25 stats", zap.Error(err)) + log.Ctx(ctx).Warn("compact wrong, failed to serialize write segment bm25 stats", zap.Error(err)) return nil, err } st.logIDOffset += binlogNums @@ -288,7 +288,7 @@ func (st *statsTask) sortSegment(ctx context.Context) ([]*datapb.FieldBinlog, er st.req.GetInsertChannel(), int64(len(values)), insertLogs, statsLogs, bm25StatsLogs) - log.Info("sort segment end", + log.Ctx(ctx).Info("sort segment end", zap.String("clusterID", st.req.GetClusterID()), zap.Int64("taskID", st.req.GetTaskID()), zap.Int64("collectionID", st.req.GetCollectionID()), @@ -330,7 +330,7 @@ func (st *statsTask) Execute(ctx context.Context) error { st.req.GetTaskID(), insertLogs) if err != nil { - log.Warn("stats wrong, failed to create text index", zap.Error(err)) + log.Ctx(ctx).Warn("stats wrong, failed to create text index", zap.Error(err)) return err } } @@ -569,7 +569,7 @@ func statSerializeWrite(ctx context.Context, io io.BinlogIO, startID int64, writ }, } if err := io.Upload(ctx, kvs); err != nil { - log.Warn("failed to upload insert log", zap.Error(err)) + log.Ctx(ctx).Warn("failed to upload insert log", zap.Error(err)) return binlogNum, nil, err } @@ -607,7 +607,7 @@ func bm25SerializeWrite(ctx context.Context, io io.BinlogIO, startID int64, writ } if err := io.Upload(ctx, kvs); err != nil { - log.Warn("failed to upload bm25 log", zap.Error(err)) + log.Ctx(ctx).Warn("failed to upload bm25 log", zap.Error(err)) return 0, nil, err } diff --git a/internal/metastore/kv/datacoord/kv_catalog.go b/internal/metastore/kv/datacoord/kv_catalog.go index 6b723467e0dad..3a4d3b4a137a6 100644 --- a/internal/metastore/kv/datacoord/kv_catalog.go +++ b/internal/metastore/kv/datacoord/kv_catalog.go @@ -330,7 +330,7 @@ func (kc *Catalog) AlterSegments(ctx context.Context, segments []*datapb.Segment maps.Copy(kvs, binlogKvs) } - return kc.SaveByBatch(kvs) + return kc.SaveByBatch(ctx, kvs) } func (kc *Catalog) handleDroppedSegment(segment *datapb.SegmentInfo) (kvs map[string]string, err error) { @@ -349,13 +349,13 @@ func (kc *Catalog) handleDroppedSegment(segment *datapb.SegmentInfo) (kvs map[st return } -func (kc *Catalog) SaveByBatch(kvs map[string]string) error { +func (kc *Catalog) SaveByBatch(ctx context.Context, kvs map[string]string) error { saveFn := func(partialKvs map[string]string) error { return kc.MetaKv.MultiSave(partialKvs) } err := etcd.SaveByBatchWithLimit(kvs, util.MaxEtcdTxnNum, saveFn) if err != nil { - log.Error("failed to save by batch", zap.Error(err)) + log.Ctx(ctx).Error("failed to save by batch", zap.Error(err)) return err } return nil @@ -448,10 +448,10 @@ func (kc *Catalog) MarkChannelAdded(ctx context.Context, channel string) error { key := buildChannelRemovePath(channel) err := kc.MetaKv.Save(key, NonRemoveFlagTomestone) if err != nil { - log.Error("failed to mark channel added", zap.String("channel", channel), zap.Error(err)) + log.Ctx(ctx).Error("failed to mark channel added", zap.String("channel", channel), zap.Error(err)) return err } - log.Info("NON remove flag tombstone added", zap.String("channel", channel)) + log.Ctx(ctx).Info("NON remove flag tombstone added", zap.String("channel", channel)) return nil } @@ -459,10 +459,10 @@ func (kc *Catalog) MarkChannelDeleted(ctx context.Context, channel string) error key := buildChannelRemovePath(channel) err := kc.MetaKv.Save(key, RemoveFlagTomestone) if err != nil { - log.Error("Failed to mark channel dropped", zap.String("channel", channel), zap.Error(err)) + log.Ctx(ctx).Error("Failed to mark channel dropped", zap.String("channel", channel), zap.Error(err)) return err } - log.Info("remove flag tombstone added", zap.String("channel", channel)) + log.Ctx(ctx).Info("remove flag tombstone added", zap.String("channel", channel)) return nil } @@ -484,7 +484,7 @@ func (kc *Catalog) ChannelExists(ctx context.Context, channel string) bool { // DropChannel removes channel remove flag after whole procedure is finished func (kc *Catalog) DropChannel(ctx context.Context, channel string) error { key := buildChannelRemovePath(channel) - log.Info("removing channel remove path", zap.String("channel", channel)) + log.Ctx(ctx).Info("removing channel remove path", zap.String("channel", channel)) return kc.MetaKv.Remove(key) } @@ -500,7 +500,7 @@ func (kc *Catalog) ListChannelCheckpoint(ctx context.Context) (map[string]*msgpb channelCP := &msgpb.MsgPosition{} err = proto.Unmarshal([]byte(value), channelCP) if err != nil { - log.Error("unmarshal channelCP failed when ListChannelCheckpoint", zap.Error(err)) + log.Ctx(ctx).Error("unmarshal channelCP failed when ListChannelCheckpoint", zap.Error(err)) return nil, err } ss := strings.Split(key, "/") @@ -530,7 +530,7 @@ func (kc *Catalog) SaveChannelCheckpoints(ctx context.Context, positions []*msgp } kvs[k] = string(v) } - return kc.SaveByBatch(kvs) + return kc.SaveByBatch(ctx, kvs) } func (kc *Catalog) DropChannelCheckpoint(ctx context.Context, vChannel string) error { @@ -577,7 +577,7 @@ func (kc *Catalog) CreateIndex(ctx context.Context, index *model.Index) error { func (kc *Catalog) ListIndexes(ctx context.Context) ([]*model.Index, error) { _, values, err := kc.MetaKv.LoadWithPrefix(util.FieldIndexPrefix) if err != nil { - log.Error("list index meta fail", zap.String("prefix", util.FieldIndexPrefix), zap.Error(err)) + log.Ctx(ctx).Error("list index meta fail", zap.String("prefix", util.FieldIndexPrefix), zap.Error(err)) return nil, err } @@ -586,7 +586,7 @@ func (kc *Catalog) ListIndexes(ctx context.Context) ([]*model.Index, error) { meta := &indexpb.FieldIndex{} err = proto.Unmarshal([]byte(value), meta) if err != nil { - log.Warn("unmarshal index info failed", zap.Error(err)) + log.Ctx(ctx).Warn("unmarshal index info failed", zap.Error(err)) return nil, err } @@ -627,7 +627,7 @@ func (kc *Catalog) DropIndex(ctx context.Context, collID typeutil.UniqueID, drop err := kc.MetaKv.Remove(key) if err != nil { - log.Error("drop collection index meta fail", zap.Int64("collectionID", collID), + log.Ctx(ctx).Error("drop collection index meta fail", zap.Int64("collectionID", collID), zap.Int64("indexID", dropIdxID), zap.Error(err)) return err } @@ -644,7 +644,7 @@ func (kc *Catalog) CreateSegmentIndex(ctx context.Context, segIdx *model.Segment } err = kc.MetaKv.Save(key, string(value)) if err != nil { - log.Error("failed to save segment index meta in etcd", zap.Int64("buildID", segIdx.BuildID), + log.Ctx(ctx).Error("failed to save segment index meta in etcd", zap.Int64("buildID", segIdx.BuildID), zap.Int64("segmentID", segIdx.SegmentID), zap.Error(err)) return err } @@ -654,7 +654,7 @@ func (kc *Catalog) CreateSegmentIndex(ctx context.Context, segIdx *model.Segment func (kc *Catalog) ListSegmentIndexes(ctx context.Context) ([]*model.SegmentIndex, error) { _, values, err := kc.MetaKv.LoadWithPrefix(util.SegmentIndexPrefix) if err != nil { - log.Error("list segment index meta fail", zap.String("prefix", util.SegmentIndexPrefix), zap.Error(err)) + log.Ctx(ctx).Error("list segment index meta fail", zap.String("prefix", util.SegmentIndexPrefix), zap.Error(err)) return nil, err } @@ -663,7 +663,7 @@ func (kc *Catalog) ListSegmentIndexes(ctx context.Context) ([]*model.SegmentInde segmentIndexInfo := &indexpb.SegmentIndex{} err = proto.Unmarshal([]byte(value), segmentIndexInfo) if err != nil { - log.Warn("unmarshal segment index info failed", zap.Error(err)) + log.Ctx(ctx).Warn("unmarshal segment index info failed", zap.Error(err)) return segIndexes, err } @@ -691,7 +691,7 @@ func (kc *Catalog) DropSegmentIndex(ctx context.Context, collID, partID, segID, err := kc.MetaKv.Remove(key) if err != nil { - log.Error("drop segment index meta fail", zap.Int64("buildID", buildID), zap.Error(err)) + log.Ctx(ctx).Error("drop segment index meta fail", zap.Int64("buildID", buildID), zap.Error(err)) return err } @@ -838,7 +838,7 @@ func (kc *Catalog) SaveCompactionTask(ctx context.Context, coll *datapb.Compacti } kvs := make(map[string]string) kvs[k] = v - return kc.SaveByBatch(kvs) + return kc.SaveByBatch(ctx, kvs) } func (kc *Catalog) DropCompactionTask(ctx context.Context, task *datapb.CompactionTask) error { @@ -913,7 +913,7 @@ func (kc *Catalog) SavePartitionStatsInfo(ctx context.Context, coll *datapb.Part } kvs := make(map[string]string) kvs[k] = v - return kc.SaveByBatch(kvs) + return kc.SaveByBatch(ctx, kvs) } func (kc *Catalog) DropPartitionStatsInfo(ctx context.Context, info *datapb.PartitionStatsInfo) error { diff --git a/internal/metastore/kv/querycoord/kv_catalog.go b/internal/metastore/kv/querycoord/kv_catalog.go index 2c7d5edc38f41..5355df0084640 100644 --- a/internal/metastore/kv/querycoord/kv_catalog.go +++ b/internal/metastore/kv/querycoord/kv_catalog.go @@ -8,7 +8,6 @@ import ( "github.com/cockroachdb/errors" "github.com/klauspost/compress/zstd" - "github.com/pingcap/log" "github.com/samber/lo" "go.uber.org/zap" "google.golang.org/protobuf/proto" @@ -16,6 +15,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" "github.com/milvus-io/milvus/internal/proto/querypb" "github.com/milvus-io/milvus/pkg/kv" + "github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/util/compressor" ) diff --git a/internal/metastore/kv/rootcoord/kv_catalog.go b/internal/metastore/kv/rootcoord/kv_catalog.go index 7bc1e4e170496..b26b2d47fb99a 100644 --- a/internal/metastore/kv/rootcoord/kv_catalog.go +++ b/internal/metastore/kv/rootcoord/kv_catalog.go @@ -322,13 +322,13 @@ func (kc *Catalog) CreateCredential(ctx context.Context, credential *model.Crede k := fmt.Sprintf("%s/%s", CredentialPrefix, credential.Username) v, err := json.Marshal(&internalpb.CredentialInfo{EncryptedPassword: credential.EncryptedPassword}) if err != nil { - log.Error("create credential marshal fail", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Error("create credential marshal fail", zap.String("key", k), zap.Error(err)) return err } err = kc.Txn.Save(k, string(v)) if err != nil { - log.Error("create credential persist meta fail", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Error("create credential persist meta fail", zap.String("key", k), zap.Error(err)) return err } @@ -561,9 +561,9 @@ func (kc *Catalog) GetCredential(ctx context.Context, username string) (*model.C v, err := kc.Txn.Load(k) if err != nil { if errors.Is(err, merr.ErrIoKeyNotFound) { - log.Debug("not found the user", zap.String("key", k)) + log.Ctx(ctx).Debug("not found the user", zap.String("key", k)) } else { - log.Warn("get credential meta fail", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Warn("get credential meta fail", zap.String("key", k), zap.Error(err)) } return nil, err } @@ -732,7 +732,7 @@ func (kc *Catalog) DropCredential(ctx context.Context, username string) error { k := fmt.Sprintf("%s/%s", CredentialPrefix, username) userResults, err := kc.ListUser(ctx, util.DefaultTenant, &milvuspb.UserEntity{Name: username}, true) if err != nil && !errors.Is(err, merr.ErrIoKeyNotFound) { - log.Warn("fail to list user", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Warn("fail to list user", zap.String("key", k), zap.Error(err)) return err } deleteKeys := make([]string, 0, len(userResults)+1) @@ -747,7 +747,7 @@ func (kc *Catalog) DropCredential(ctx context.Context, username string) error { } err = kc.Txn.MultiRemove(deleteKeys) if err != nil { - log.Warn("fail to drop credential", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Warn("fail to drop credential", zap.String("key", k), zap.Error(err)) return err } @@ -765,7 +765,7 @@ func (kc *Catalog) GetCollectionByName(ctx context.Context, dbID int64, collecti prefix := getDatabasePrefix(dbID) _, vals, err := kc.Snapshot.LoadWithPrefix(prefix, ts) if err != nil { - log.Warn("get collection meta fail", zap.String("collectionName", collectionName), zap.Error(err)) + log.Ctx(ctx).Warn("get collection meta fail", zap.String("collectionName", collectionName), zap.Error(err)) return nil, err } @@ -773,7 +773,7 @@ func (kc *Catalog) GetCollectionByName(ctx context.Context, dbID int64, collecti colMeta := pb.CollectionInfo{} err = proto.Unmarshal([]byte(val), &colMeta) if err != nil { - log.Warn("get collection meta unmarshal fail", zap.String("collectionName", collectionName), zap.Error(err)) + log.Ctx(ctx).Warn("get collection meta unmarshal fail", zap.String("collectionName", collectionName), zap.Error(err)) continue } if colMeta.Schema.Name == collectionName { @@ -789,7 +789,7 @@ func (kc *Catalog) ListCollections(ctx context.Context, dbID int64, ts typeutil. prefix := getDatabasePrefix(dbID) _, vals, err := kc.Snapshot.LoadWithPrefix(prefix, ts) if err != nil { - log.Error("get collections meta fail", + log.Ctx(ctx).Error("get collections meta fail", zap.String("prefix", prefix), zap.Uint64("timestamp", ts), zap.Error(err)) @@ -802,17 +802,17 @@ func (kc *Catalog) ListCollections(ctx context.Context, dbID int64, ts typeutil. collMeta := &pb.CollectionInfo{} err := proto.Unmarshal([]byte(val), collMeta) if err != nil { - log.Warn("unmarshal collection info failed", zap.Error(err)) + log.Ctx(ctx).Warn("unmarshal collection info failed", zap.Error(err)) continue } kc.fixDefaultDBIDConsistency(ctx, collMeta, ts) colls = append(colls, collMeta) } - log.Info("unmarshal all collection details cost", zap.Int64("db", dbID), zap.Duration("cost", time.Since(start))) + log.Ctx(ctx).Info("unmarshal all collection details cost", zap.Int64("db", dbID), zap.Duration("cost", time.Since(start))) start = time.Now() ret, err := kc.batchAppendPartitionAndFieldsInfo(ctx, colls, ts) - log.Info("append partition and fields info cost", zap.Int64("db", dbID), zap.Duration("cost", time.Since(start))) + log.Ctx(ctx).Info("append partition and fields info cost", zap.Int64("db", dbID), zap.Duration("cost", time.Since(start))) if err != nil { return nil, err } @@ -919,7 +919,7 @@ func (kc *Catalog) ListCredentials(ctx context.Context) ([]string, error) { func (kc *Catalog) ListCredentialsWithPasswd(ctx context.Context) (map[string]string, error) { keys, values, err := kc.Txn.LoadWithPrefix(CredentialPrefix) if err != nil { - log.Error("list all credential usernames fail", zap.String("prefix", CredentialPrefix), zap.Error(err)) + log.Ctx(ctx).Error("list all credential usernames fail", zap.String("prefix", CredentialPrefix), zap.Error(err)) return nil, err } @@ -927,13 +927,13 @@ func (kc *Catalog) ListCredentialsWithPasswd(ctx context.Context) (map[string]st for i := range keys { username := typeutil.After(keys[i], UserSubPrefix+"/") if len(username) == 0 { - log.Warn("no username extract from path:", zap.String("path", keys[i])) + log.Ctx(ctx).Warn("no username extract from path:", zap.String("path", keys[i])) continue } credential := &internalpb.CredentialInfo{} err := json.Unmarshal([]byte(values[i]), credential) if err != nil { - log.Error("credential unmarshal fail", zap.String("key", keys[i]), zap.Error(err)) + log.Ctx(ctx).Error("credential unmarshal fail", zap.String("key", keys[i]), zap.Error(err)) return nil, err } users[username] = credential.EncryptedPassword @@ -942,24 +942,25 @@ func (kc *Catalog) ListCredentialsWithPasswd(ctx context.Context) (map[string]st return users, nil } -func (kc *Catalog) save(k string) error { +func (kc *Catalog) save(ctx context.Context, k string) error { var err error if _, err = kc.Txn.Load(k); err != nil && !errors.Is(err, merr.ErrIoKeyNotFound) { return err } if err == nil { - log.Debug("the key has existed", zap.String("key", k)) + log.Ctx(ctx).Debug("the key has existed", zap.String("key", k)) return common.NewIgnorableError(fmt.Errorf("the key[%s] has existed", k)) } return kc.Txn.Save(k, "") } -func (kc *Catalog) remove(k string) error { +func (kc *Catalog) remove(ctx context.Context, k string) error { var err error if _, err = kc.Txn.Load(k); err != nil && !errors.Is(err, merr.ErrIoKeyNotFound) { return err } if err != nil && errors.Is(err, merr.ErrIoKeyNotFound) { + log.Ctx(ctx).Debug("the key isn't existed", zap.String("key", k)) return common.NewIgnorableError(fmt.Errorf("the key[%s] isn't existed", k)) } return kc.Txn.Remove(k) @@ -967,9 +968,9 @@ func (kc *Catalog) remove(k string) error { func (kc *Catalog) CreateRole(ctx context.Context, tenant string, entity *milvuspb.RoleEntity) error { k := funcutil.HandleTenantForEtcdKey(RolePrefix, tenant, entity.Name) - err := kc.save(k) + err := kc.save(ctx, k) if err != nil && !common.IsIgnorableError(err) { - log.Warn("fail to save the role", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Warn("fail to save the role", zap.String("key", k), zap.Error(err)) } return err } @@ -978,7 +979,7 @@ func (kc *Catalog) DropRole(ctx context.Context, tenant string, roleName string) k := funcutil.HandleTenantForEtcdKey(RolePrefix, tenant, roleName) roleResults, err := kc.ListRole(ctx, tenant, &milvuspb.RoleEntity{Name: roleName}, true) if err != nil && !errors.Is(err, merr.ErrIoKeyNotFound) { - log.Warn("fail to list role", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Warn("fail to list role", zap.String("key", k), zap.Error(err)) return err } @@ -995,7 +996,7 @@ func (kc *Catalog) DropRole(ctx context.Context, tenant string, roleName string) err = kc.Txn.MultiRemove(deleteKeys) if err != nil { - log.Warn("fail to drop role", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Warn("fail to drop role", zap.String("key", k), zap.Error(err)) return err } return nil @@ -1005,14 +1006,14 @@ func (kc *Catalog) AlterUserRole(ctx context.Context, tenant string, userEntity k := funcutil.HandleTenantForEtcdKey(RoleMappingPrefix, tenant, fmt.Sprintf("%s/%s", userEntity.Name, roleEntity.Name)) var err error if operateType == milvuspb.OperateUserRoleType_AddUserToRole { - err = kc.save(k) + err = kc.save(ctx, k) if err != nil { - log.Error("fail to save the user-role", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Error("fail to save the user-role", zap.String("key", k), zap.Error(err)) } } else if operateType == milvuspb.OperateUserRoleType_RemoveUserFromRole { - err = kc.remove(k) + err = kc.remove(ctx, k) if err != nil { - log.Error("fail to remove the user-role", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Error("fail to remove the user-role", zap.String("key", k), zap.Error(err)) } } else { err = fmt.Errorf("invalid operate user role type, operate type: %d", operateType) @@ -1028,14 +1029,14 @@ func (kc *Catalog) ListRole(ctx context.Context, tenant string, entity *milvuspb roleMappingKey := funcutil.HandleTenantForEtcdKey(RoleMappingPrefix, tenant, "") keys, _, err := kc.Txn.LoadWithPrefix(roleMappingKey) if err != nil { - log.Error("fail to load role mappings", zap.String("key", roleMappingKey), zap.Error(err)) + log.Ctx(ctx).Error("fail to load role mappings", zap.String("key", roleMappingKey), zap.Error(err)) return results, err } for _, key := range keys { roleMappingInfos := typeutil.AfterN(key, roleMappingKey+"/", "/") if len(roleMappingInfos) != 2 { - log.Warn("invalid role mapping key", zap.String("string", key), zap.String("sub_string", roleMappingKey)) + log.Ctx(ctx).Warn("invalid role mapping key", zap.String("string", key), zap.String("sub_string", roleMappingKey)) continue } username := roleMappingInfos[0] @@ -1059,13 +1060,13 @@ func (kc *Catalog) ListRole(ctx context.Context, tenant string, entity *milvuspb roleKey := funcutil.HandleTenantForEtcdKey(RolePrefix, tenant, "") keys, _, err := kc.Txn.LoadWithPrefix(roleKey) if err != nil { - log.Error("fail to load roles", zap.String("key", roleKey), zap.Error(err)) + log.Ctx(ctx).Error("fail to load roles", zap.String("key", roleKey), zap.Error(err)) return results, err } for _, key := range keys { infoArr := typeutil.AfterN(key, roleKey+"/", "/") if len(infoArr) != 1 || len(infoArr[0]) == 0 { - log.Warn("invalid role key", zap.String("string", key), zap.String("sub_string", roleKey)) + log.Ctx(ctx).Warn("invalid role key", zap.String("string", key), zap.String("sub_string", roleKey)) continue } appendRoleResult(infoArr[0]) @@ -1077,7 +1078,7 @@ func (kc *Catalog) ListRole(ctx context.Context, tenant string, entity *milvuspb roleKey := funcutil.HandleTenantForEtcdKey(RolePrefix, tenant, entity.Name) _, err := kc.Txn.Load(roleKey) if err != nil { - log.Warn("fail to load a role", zap.String("key", roleKey), zap.Error(err)) + log.Ctx(ctx).Warn("fail to load a role", zap.String("key", roleKey), zap.Error(err)) return results, err } appendRoleResult(entity.Name) @@ -1086,18 +1087,18 @@ func (kc *Catalog) ListRole(ctx context.Context, tenant string, entity *milvuspb return results, nil } -func (kc *Catalog) getRolesByUsername(tenant string, username string) ([]string, error) { +func (kc *Catalog) getRolesByUsername(ctx context.Context, tenant string, username string) ([]string, error) { var roles []string k := funcutil.HandleTenantForEtcdKey(RoleMappingPrefix, tenant, username) keys, _, err := kc.Txn.LoadWithPrefix(k) if err != nil { - log.Error("fail to load role mappings by the username", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Error("fail to load role mappings by the username", zap.String("key", k), zap.Error(err)) return roles, err } for _, key := range keys { roleMappingInfos := typeutil.AfterN(key, k+"/", "/") if len(roleMappingInfos) != 1 { - log.Warn("invalid role mapping key", zap.String("string", key), zap.String("sub_string", k)) + log.Ctx(ctx).Warn("invalid role mapping key", zap.String("string", key), zap.String("sub_string", k)) continue } roles = append(roles, roleMappingInfos[0]) @@ -1106,14 +1107,14 @@ func (kc *Catalog) getRolesByUsername(tenant string, username string) ([]string, } // getUserResult get the user result by the username. And never return the error because the error means the user isn't added to a role. -func (kc *Catalog) getUserResult(tenant string, username string, includeRoleInfo bool) (*milvuspb.UserResult, error) { +func (kc *Catalog) getUserResult(ctx context.Context, tenant string, username string, includeRoleInfo bool) (*milvuspb.UserResult, error) { result := &milvuspb.UserResult{User: &milvuspb.UserEntity{Name: username}} if !includeRoleInfo { return result, nil } - roleNames, err := kc.getRolesByUsername(tenant, username) + roleNames, err := kc.getRolesByUsername(ctx, tenant, username) if err != nil { - log.Warn("fail to get roles by the username", zap.Error(err)) + log.Ctx(ctx).Warn("fail to get roles by the username", zap.Error(err)) return result, err } var roles []*milvuspb.RoleEntity @@ -1132,7 +1133,7 @@ func (kc *Catalog) ListUser(ctx context.Context, tenant string, entity *milvuspb ) appendUserResult := func(username string) error { - result, err := kc.getUserResult(tenant, username, includeRoleInfo) + result, err := kc.getUserResult(ctx, tenant, username, includeRoleInfo) if err != nil { return err } @@ -1184,7 +1185,7 @@ func (kc *Catalog) AlterGrant(ctx context.Context, tenant string, entity *milvus if v, err = kc.Txn.Load(k); err == nil { idStr = v } else { - log.Warn("fail to load grant privilege entity", zap.String("key", k), zap.Any("type", operateType), zap.Error(err)) + log.Ctx(ctx).Warn("fail to load grant privilege entity", zap.String("key", k), zap.Any("type", operateType), zap.Error(err)) if funcutil.IsRevoke(operateType) { if errors.Is(err, merr.ErrIoKeyNotFound) { return common.NewIgnorableError(fmt.Errorf("the grant[%s] isn't existed", k)) @@ -1198,7 +1199,7 @@ func (kc *Catalog) AlterGrant(ctx context.Context, tenant string, entity *milvus idStr = crypto.MD5(k) err = kc.Txn.Save(k, idStr) if err != nil { - log.Error("fail to allocate id when altering the grant", zap.Error(err)) + log.Ctx(ctx).Error("fail to allocate id when altering the grant", zap.Error(err)) return err } } @@ -1206,18 +1207,18 @@ func (kc *Catalog) AlterGrant(ctx context.Context, tenant string, entity *milvus k = funcutil.HandleTenantForEtcdKey(GranteeIDPrefix, tenant, fmt.Sprintf("%s/%s", idStr, privilegeName)) _, err = kc.Txn.Load(k) if err != nil { - log.Warn("fail to load the grantee id", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Warn("fail to load the grantee id", zap.String("key", k), zap.Error(err)) if !errors.Is(err, merr.ErrIoKeyNotFound) { log.Warn("fail to load the grantee id", zap.String("key", k), zap.Error(err)) return err } - log.Debug("not found the grantee id", zap.String("key", k)) + log.Ctx(ctx).Debug("not found the grantee id", zap.String("key", k)) if funcutil.IsRevoke(operateType) { return common.NewIgnorableError(fmt.Errorf("the grantee-id[%s] isn't existed", k)) } if funcutil.IsGrant(operateType) { if err = kc.Txn.Save(k, entity.Grantor.User.Name); err != nil { - log.Error("fail to save the grantee id", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Error("fail to save the grantee id", zap.String("key", k), zap.Error(err)) } return err } @@ -1225,7 +1226,7 @@ func (kc *Catalog) AlterGrant(ctx context.Context, tenant string, entity *milvus } if funcutil.IsRevoke(operateType) { if err = kc.Txn.Remove(k); err != nil { - log.Error("fail to remove the grantee id", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Error("fail to remove the grantee id", zap.String("key", k), zap.Error(err)) return err } return err @@ -1246,13 +1247,13 @@ func (kc *Catalog) ListGrant(ctx context.Context, tenant string, entity *milvusp granteeIDKey := funcutil.HandleTenantForEtcdKey(GranteeIDPrefix, tenant, v) keys, values, err := kc.Txn.LoadWithPrefix(granteeIDKey) if err != nil { - log.Error("fail to load the grantee ids", zap.String("key", granteeIDKey), zap.Error(err)) + log.Ctx(ctx).Error("fail to load the grantee ids", zap.String("key", granteeIDKey), zap.Error(err)) return err } for i, key := range keys { granteeIDInfos := typeutil.AfterN(key, granteeIDKey+"/", "/") if len(granteeIDInfos) != 1 { - log.Warn("invalid grantee id", zap.String("string", key), zap.String("sub_string", granteeIDKey)) + log.Ctx(ctx).Warn("invalid grantee id", zap.String("string", key), zap.String("sub_string", granteeIDKey)) continue } privilegeName := util.PrivilegeNameForAPI(granteeIDInfos[0]) @@ -1296,7 +1297,7 @@ func (kc *Catalog) ListGrant(ctx context.Context, tenant string, entity *milvusp granteeKey = funcutil.HandleTenantForEtcdKey(GranteePrefix, tenant, fmt.Sprintf("%s/%s/%s", entity.Role.Name, entity.Object.Name, funcutil.CombineObjectName(entity.DbName, entity.ObjectName))) v, err := kc.Txn.Load(granteeKey) if err != nil { - log.Error("fail to load the grant privilege entity", zap.String("key", granteeKey), zap.Error(err)) + log.Ctx(ctx).Error("fail to load the grant privilege entity", zap.String("key", granteeKey), zap.Error(err)) return entities, err } err = appendGrantEntity(v, entity.Object.Name, funcutil.CombineObjectName(entity.DbName, entity.ObjectName)) @@ -1307,13 +1308,13 @@ func (kc *Catalog) ListGrant(ctx context.Context, tenant string, entity *milvusp granteeKey = funcutil.HandleTenantForEtcdKey(GranteePrefix, tenant, entity.Role.Name) keys, values, err := kc.Txn.LoadWithPrefix(granteeKey) if err != nil { - log.Error("fail to load grant privilege entities", zap.String("key", granteeKey), zap.Error(err)) + log.Ctx(ctx).Error("fail to load grant privilege entities", zap.String("key", granteeKey), zap.Error(err)) return entities, err } for i, key := range keys { grantInfos := typeutil.AfterN(key, granteeKey+"/", "/") if len(grantInfos) != 2 { - log.Warn("invalid grantee key", zap.String("string", key), zap.String("sub_string", granteeKey)) + log.Ctx(ctx).Warn("invalid grantee key", zap.String("string", key), zap.String("sub_string", granteeKey)) continue } err = appendGrantEntity(values[i], grantInfos[0], grantInfos[1]) @@ -1338,7 +1339,7 @@ func (kc *Catalog) DeleteGrant(ctx context.Context, tenant string, role *milvusp // the values are the grantee id list _, values, err := kc.Txn.LoadWithPrefix(k) if err != nil { - log.Warn("fail to load grant privilege entities", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Warn("fail to load grant privilege entities", zap.String("key", k), zap.Error(err)) return err } for _, v := range values { @@ -1347,7 +1348,7 @@ func (kc *Catalog) DeleteGrant(ctx context.Context, tenant string, role *milvusp } if err = kc.Txn.MultiSaveAndRemoveWithPrefix(nil, removeKeys); err != nil { - log.Error("fail to remove with the prefix", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Error("fail to remove with the prefix", zap.String("key", k), zap.Error(err)) } return err } @@ -1357,26 +1358,26 @@ func (kc *Catalog) ListPolicy(ctx context.Context, tenant string) ([]string, err granteeKey := funcutil.HandleTenantForEtcdKey(GranteePrefix, tenant, "") keys, values, err := kc.Txn.LoadWithPrefix(granteeKey) if err != nil { - log.Error("fail to load all grant privilege entities", zap.String("key", granteeKey), zap.Error(err)) + log.Ctx(ctx).Error("fail to load all grant privilege entities", zap.String("key", granteeKey), zap.Error(err)) return []string{}, err } for i, key := range keys { grantInfos := typeutil.AfterN(key, granteeKey+"/", "/") if len(grantInfos) != 3 { - log.Warn("invalid grantee key", zap.String("string", key), zap.String("sub_string", granteeKey)) + log.Ctx(ctx).Warn("invalid grantee key", zap.String("string", key), zap.String("sub_string", granteeKey)) continue } granteeIDKey := funcutil.HandleTenantForEtcdKey(GranteeIDPrefix, tenant, values[i]) idKeys, _, err := kc.Txn.LoadWithPrefix(granteeIDKey) if err != nil { - log.Error("fail to load the grantee ids", zap.String("key", granteeIDKey), zap.Error(err)) + log.Ctx(ctx).Error("fail to load the grantee ids", zap.String("key", granteeIDKey), zap.Error(err)) return []string{}, err } for _, idKey := range idKeys { granteeIDInfos := typeutil.AfterN(idKey, granteeIDKey+"/", "/") if len(granteeIDInfos) != 1 { - log.Warn("invalid grantee id", zap.String("string", idKey), zap.String("sub_string", granteeIDKey)) + log.Ctx(ctx).Warn("invalid grantee id", zap.String("string", idKey), zap.String("sub_string", granteeIDKey)) continue } dbName, objectName := funcutil.SplitObjectName(grantInfos[2]) @@ -1392,14 +1393,14 @@ func (kc *Catalog) ListUserRole(ctx context.Context, tenant string) ([]string, e k := funcutil.HandleTenantForEtcdKey(RoleMappingPrefix, tenant, "") keys, _, err := kc.Txn.LoadWithPrefix(k) if err != nil { - log.Error("fail to load all user-role mappings", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Error("fail to load all user-role mappings", zap.String("key", k), zap.Error(err)) return []string{}, err } for _, key := range keys { userRolesInfos := typeutil.AfterN(key, k+"/", "/") if len(userRolesInfos) != 2 { - log.Warn("invalid user-role key", zap.String("string", key), zap.String("sub_string", k)) + log.Ctx(ctx).Warn("invalid user-role key", zap.String("string", key), zap.String("sub_string", k)) continue } userRoles = append(userRoles, funcutil.EncodeUserRoleCache(userRolesInfos[0], userRolesInfos[1])) @@ -1477,12 +1478,12 @@ func (kc *Catalog) RestoreRBAC(ctx context.Context, tenant string, meta *milvusp needRollbackPrivilegeGroups := make([]*milvuspb.PrivilegeGroupInfo, 0) defer func() { if err != nil { - log.Warn("failed to restore rbac, try to rollback", zap.Error(err)) + log.Ctx(ctx).Warn("failed to restore rbac, try to rollback", zap.Error(err)) // roll back role for _, role := range needRollbackRole { err = kc.DropRole(ctx, tenant, role.Name) if err != nil { - log.Warn("failed to rollback roles after restore failed", zap.Error(err)) + log.Ctx(ctx).Warn("failed to rollback roles after restore failed", zap.Error(err)) } } @@ -1490,7 +1491,7 @@ func (kc *Catalog) RestoreRBAC(ctx context.Context, tenant string, meta *milvusp for _, grant := range needRollbackGrants { err = kc.AlterGrant(ctx, tenant, grant, milvuspb.OperatePrivilegeType_Revoke) if err != nil { - log.Warn("failed to rollback grants after restore failed", zap.Error(err)) + log.Ctx(ctx).Warn("failed to rollback grants after restore failed", zap.Error(err)) } } @@ -1498,7 +1499,7 @@ func (kc *Catalog) RestoreRBAC(ctx context.Context, tenant string, meta *milvusp // roll back user err = kc.DropCredential(ctx, user.User) if err != nil { - log.Warn("failed to rollback users after restore failed", zap.Error(err)) + log.Ctx(ctx).Warn("failed to rollback users after restore failed", zap.Error(err)) } } @@ -1506,7 +1507,7 @@ func (kc *Catalog) RestoreRBAC(ctx context.Context, tenant string, meta *milvusp for _, group := range needRollbackPrivilegeGroups { err = kc.DropPrivilegeGroup(ctx, group.GroupName) if err != nil { - log.Warn("failed to rollback privilege groups after restore failed", zap.Error(err)) + log.Ctx(ctx).Warn("failed to rollback privilege groups after restore failed", zap.Error(err)) } } } @@ -1520,7 +1521,7 @@ func (kc *Catalog) RestoreRBAC(ctx context.Context, tenant string, meta *milvusp existRoleMap := lo.SliceToMap(existRoles, func(entity *milvuspb.RoleResult) (string, struct{}) { return entity.GetRole().GetName(), struct{}{} }) for _, role := range meta.Roles { if _, ok := existRoleMap[role.GetName()]; ok { - log.Warn("failed to restore, role already exists", zap.String("role", role.GetName())) + log.Ctx(ctx).Warn("failed to restore, role already exists", zap.String("role", role.GetName())) err = errors.Newf("role [%s] already exists", role.GetName()) return err } @@ -1539,7 +1540,7 @@ func (kc *Catalog) RestoreRBAC(ctx context.Context, tenant string, meta *milvusp existPrivGroupMap := lo.SliceToMap(existPrivGroups, func(entity *milvuspb.PrivilegeGroupInfo) (string, struct{}) { return entity.GroupName, struct{}{} }) for _, group := range meta.PrivilegeGroups { if _, ok := existPrivGroupMap[group.GroupName]; ok { - log.Warn("failed to restore, privilege group already exists", zap.String("group", group.GroupName)) + log.Ctx(ctx).Warn("failed to restore, privilege group already exists", zap.String("group", group.GroupName)) err = errors.Newf("privilege group [%s] already exists", group.GroupName) return err } @@ -1563,7 +1564,7 @@ func (kc *Catalog) RestoreRBAC(ctx context.Context, tenant string, meta *milvusp } else if _, ok := existPrivGroupMap[privName]; ok { grant.Grantor.Privilege.Name = util.PrivilegeGroupNameForMetastore(privName) } else { - log.Warn("failed to restore, privilege group does not exist", zap.String("group", privName)) + log.Ctx(ctx).Warn("failed to restore, privilege group does not exist", zap.String("group", privName)) err = errors.Newf("privilege group [%s] does not exist", privName) return err } @@ -1582,7 +1583,7 @@ func (kc *Catalog) RestoreRBAC(ctx context.Context, tenant string, meta *milvusp existUserMap := lo.SliceToMap(existUser, func(entity *milvuspb.UserResult) (string, struct{}) { return entity.GetUser().GetName(), struct{}{} }) for _, user := range meta.Users { if _, ok := existUserMap[user.GetUser()]; ok { - log.Info("failed to restore, user already exists", zap.String("user", user.GetUser())) + log.Ctx(ctx).Info("failed to restore, user already exists", zap.String("user", user.GetUser())) err = errors.Newf("user [%s] already exists", user.GetUser()) return err } @@ -1618,13 +1619,13 @@ func (kc *Catalog) GetPrivilegeGroup(ctx context.Context, groupName string) (*mi if errors.Is(err, merr.ErrIoKeyNotFound) { return nil, fmt.Errorf("privilege group [%s] does not exist", groupName) } - log.Error("failed to load privilege group", zap.String("group", groupName), zap.Error(err)) + log.Ctx(ctx).Error("failed to load privilege group", zap.String("group", groupName), zap.Error(err)) return nil, err } privGroupInfo := &milvuspb.PrivilegeGroupInfo{} err = proto.Unmarshal([]byte(val), privGroupInfo) if err != nil { - log.Error("failed to unmarshal privilege group info", zap.Error(err)) + log.Ctx(ctx).Error("failed to unmarshal privilege group info", zap.Error(err)) return nil, err } return privGroupInfo, nil @@ -1634,7 +1635,7 @@ func (kc *Catalog) DropPrivilegeGroup(ctx context.Context, groupName string) err k := BuildPrivilegeGroupkey(groupName) err := kc.Txn.Remove(k) if err != nil { - log.Warn("fail to drop privilege group", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Warn("fail to drop privilege group", zap.String("key", k), zap.Error(err)) return err } return nil @@ -1648,11 +1649,11 @@ func (kc *Catalog) SavePrivilegeGroup(ctx context.Context, data *milvuspb.Privil } v, err := proto.Marshal(groupInfo) if err != nil { - log.Error("failed to marshal privilege group info", zap.Error(err)) + log.Ctx(ctx).Error("failed to marshal privilege group info", zap.Error(err)) return err } if err = kc.Txn.Save(k, string(v)); err != nil { - log.Warn("fail to put privilege group", zap.String("key", k), zap.Error(err)) + log.Ctx(ctx).Warn("fail to put privilege group", zap.String("key", k), zap.Error(err)) return err } return nil @@ -1661,7 +1662,7 @@ func (kc *Catalog) SavePrivilegeGroup(ctx context.Context, data *milvuspb.Privil func (kc *Catalog) ListPrivilegeGroups(ctx context.Context) ([]*milvuspb.PrivilegeGroupInfo, error) { _, vals, err := kc.Txn.LoadWithPrefix(PrivilegeGroupPrefix) if err != nil { - log.Error("failed to list privilege groups", zap.String("prefix", PrivilegeGroupPrefix), zap.Error(err)) + log.Ctx(ctx).Error("failed to list privilege groups", zap.String("prefix", PrivilegeGroupPrefix), zap.Error(err)) return nil, err } privGroups := make([]*milvuspb.PrivilegeGroupInfo, 0, len(vals)) @@ -1669,7 +1670,7 @@ func (kc *Catalog) ListPrivilegeGroups(ctx context.Context) ([]*milvuspb.Privile privGroupInfo := &milvuspb.PrivilegeGroupInfo{} err = proto.Unmarshal([]byte(val), privGroupInfo) if err != nil { - log.Error("failed to unmarshal privilege group info", zap.Error(err)) + log.Ctx(ctx).Error("failed to unmarshal privilege group info", zap.Error(err)) return nil, err } privGroups = append(privGroups, privGroupInfo) diff --git a/internal/metastore/kv/rootcoord/kv_catalog_test.go b/internal/metastore/kv/rootcoord/kv_catalog_test.go index c0dcc6368b880..71b0c96624f3b 100644 --- a/internal/metastore/kv/rootcoord/kv_catalog_test.go +++ b/internal/metastore/kv/rootcoord/kv_catalog_test.go @@ -1657,7 +1657,7 @@ func TestRBAC_Role(t *testing.T) { } for _, test := range tests { t.Run(test.description, func(t *testing.T) { - err := c.remove(test.key) + err := c.remove(ctx, test.key) if test.isValid { assert.NoError(t, err) } else { @@ -1703,7 +1703,7 @@ func TestRBAC_Role(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { - err := c.save(test.key) + err := c.save(ctx, test.key) if test.isValid { assert.NoError(t, err) } else { @@ -2074,7 +2074,7 @@ func TestRBAC_Role(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { - res, err := c.getUserResult(tenant, test.user, test.includeRoleInfo) + res, err := c.getUserResult(ctx, tenant, test.user, test.includeRoleInfo) assert.Equal(t, test.user, res.GetUser().GetName()) diff --git a/internal/proxy/impl.go b/internal/proxy/impl.go index 3a1ab22c1046e..0b971f6b730c8 100644 --- a/internal/proxy/impl.go +++ b/internal/proxy/impl.go @@ -78,7 +78,7 @@ func (node *Proxy) GetComponentStates(ctx context.Context, req *milvuspb.GetComp Status: merr.Success(), } code := node.GetStateCode() - log.Debug("Proxy current state", zap.String("StateCode", code.String())) + log.Ctx(ctx).Debug("Proxy current state", zap.String("StateCode", code.String())) nodeID := common.NotRegisteredID if node.session != nil && node.session.Registered() { nodeID = node.session.ServerID @@ -250,8 +250,7 @@ func (node *Proxy) CreateDatabase(ctx context.Context, request *milvuspb.CreateD replicateMsgStream: node.replicateMsgStream, } - log := log.With( - zap.String("traceID", sp.SpanContext().TraceID().String()), + log := log.Ctx(ctx).With( zap.String("role", typeutil.ProxyRole), zap.String("dbName", request.DbName), ) @@ -319,8 +318,7 @@ func (node *Proxy) DropDatabase(ctx context.Context, request *milvuspb.DropDatab replicateMsgStream: node.replicateMsgStream, } - log := log.With( - zap.String("traceID", sp.SpanContext().TraceID().String()), + log := log.Ctx(ctx).With( zap.String("role", typeutil.ProxyRole), zap.String("dbName", request.DbName), ) @@ -387,8 +385,7 @@ func (node *Proxy) ListDatabases(ctx context.Context, request *milvuspb.ListData rootCoord: node.rootCoord, } - log := log.With( - zap.String("traceID", sp.SpanContext().TraceID().String()), + log := log.Ctx(ctx).With( zap.String("role", typeutil.ProxyRole), ) @@ -3681,7 +3678,7 @@ func (node *Proxy) Query(ctx context.Context, request *milvuspb.QueryRequest) (* return res, err } - log.Debug(rpcDone(method)) + log.Ctx(ctx).Debug(rpcDone(method)) metrics.ProxyFunctionCall.WithLabelValues( strconv.FormatInt(paramtable.GetNodeID(), 10), @@ -5892,14 +5889,14 @@ func (node *Proxy) TransferNode(ctx context.Context, request *milvuspb.TransferN method := "TransferNode" if err := ValidateResourceGroupName(request.GetSourceResourceGroup()); err != nil { - log.Warn("TransferNode failed", + log.Ctx(ctx).Warn("TransferNode failed", zap.Error(err), ) return getErrResponse(err, method, "", ""), nil } if err := ValidateResourceGroupName(request.GetTargetResourceGroup()); err != nil { - log.Warn("TransferNode failed", + log.Ctx(ctx).Warn("TransferNode failed", zap.Error(err), ) return getErrResponse(err, method, "", ""), nil @@ -5959,14 +5956,14 @@ func (node *Proxy) TransferReplica(ctx context.Context, request *milvuspb.Transf method := "TransferReplica" if err := ValidateResourceGroupName(request.GetSourceResourceGroup()); err != nil { - log.Warn("TransferReplica failed", + log.Ctx(ctx).Warn("TransferReplica failed", zap.Error(err), ) return getErrResponse(err, method, request.GetDbName(), request.GetCollectionName()), nil } if err := ValidateResourceGroupName(request.GetTargetResourceGroup()); err != nil { - log.Warn("TransferReplica failed", + log.Ctx(ctx).Warn("TransferReplica failed", zap.Error(err), ) return getErrResponse(err, method, request.GetDbName(), request.GetCollectionName()), nil @@ -6342,6 +6339,9 @@ func (node *Proxy) AllocTimestamp(ctx context.Context, req *milvuspb.AllocTimest return &milvuspb.AllocTimestampResponse{Status: merr.Status(err)}, nil } + log := log.Ctx(ctx).With( + zap.String("role", typeutil.ProxyRole), + ) log.Info("AllocTimestamp request receive") ts, err := node.tsoAllocator.AllocOne(ctx) if err != nil { @@ -6371,10 +6371,10 @@ func (node *Proxy) ImportV2(ctx context.Context, req *internalpb.ImportRequest) return &internalpb.ImportResponse{Status: merr.Status(err)}, nil } log := log.Ctx(ctx).With( + zap.String("role", typeutil.ProxyRole), zap.String("collectionName", req.GetCollectionName()), zap.String("partition name", req.GetPartitionName()), zap.Any("files", req.GetFiles()), - zap.String("role", typeutil.ProxyRole), zap.Any("options", req.GetOptions()), ) diff --git a/internal/proxy/msg_pack.go b/internal/proxy/msg_pack.go index 96fe24eb5ead3..fb079a08d8b0d 100644 --- a/internal/proxy/msg_pack.go +++ b/internal/proxy/msg_pack.go @@ -129,7 +129,7 @@ func repackInsertDataByPartition(ctx context.Context, assignedSegmentInfos, err := segIDAssigner.GetSegmentID(insertMsg.CollectionID, partitionID, channelName, uint32(len(rowOffsets)), maxTs) metrics.ProxyAssignSegmentIDLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(time.Since(beforeAssign).Milliseconds())) if err != nil { - log.Error("allocate segmentID for insert data failed", + log.Ctx(ctx).Error("allocate segmentID for insert data failed", zap.String("collectionName", insertMsg.CollectionName), zap.String("channelName", channelName), zap.Int("allocate count", len(rowOffsets)), @@ -142,7 +142,7 @@ func repackInsertDataByPartition(ctx context.Context, subRowOffsets := rowOffsets[startPos : startPos+int(count)] msgs, err := genInsertMsgsByPartition(ctx, segmentID, partitionID, partitionName, subRowOffsets, channelName, insertMsg) if err != nil { - log.Warn("repack insert data to insert msgs failed", + log.Ctx(ctx).Warn("repack insert data to insert msgs failed", zap.String("collectionName", insertMsg.CollectionName), zap.Int64("partitionID", partitionID), zap.Error(err)) @@ -167,7 +167,7 @@ func setMsgID(ctx context.Context, return err }) if err != nil { - log.Error("failed to allocate msg id", zap.Error(err)) + log.Ctx(ctx).Error("failed to allocate msg id", zap.Error(err)) return err } @@ -195,7 +195,7 @@ func repackInsertData(ctx context.Context, partitionName := insertMsg.PartitionName msgs, err := repackInsertDataByPartition(ctx, partitionName, rowOffsets, channel, insertMsg, segIDAssigner) if err != nil { - log.Warn("repack insert data to msg pack failed", + log.Ctx(ctx).Warn("repack insert data to msg pack failed", zap.String("collectionName", insertMsg.CollectionName), zap.String("partition name", partitionName), zap.Error(err)) @@ -207,7 +207,7 @@ func repackInsertData(ctx context.Context, err := setMsgID(ctx, msgPack.Msgs, idAllocator) if err != nil { - log.Error("failed to set msgID when repack insert data", + log.Ctx(ctx).Error("failed to set msgID when repack insert data", zap.String("collectionName", insertMsg.CollectionName), zap.String("partition name", insertMsg.PartitionName), zap.Error(err)) @@ -233,14 +233,14 @@ func repackInsertDataWithPartitionKey(ctx context.Context, channel2RowOffsets := assignChannelsByPK(result.IDs, channelNames, insertMsg) partitionNames, err := getDefaultPartitionsInPartitionKeyMode(ctx, insertMsg.GetDbName(), insertMsg.CollectionName) if err != nil { - log.Warn("get default partition names failed in partition key mode", + log.Ctx(ctx).Warn("get default partition names failed in partition key mode", zap.String("collectionName", insertMsg.CollectionName), zap.Error(err)) return nil, err } hashValues, err := typeutil.HashKey2Partitions(partitionKeys, partitionNames) if err != nil { - log.Warn("has partition keys to partitions failed", + log.Ctx(ctx).Warn("has partition keys to partitions failed", zap.String("collectionName", insertMsg.CollectionName), zap.Error(err)) return nil, err @@ -274,7 +274,7 @@ func repackInsertDataWithPartitionKey(ctx context.Context, err = errGroup.Wait() if err != nil { - log.Warn("repack insert data into insert msg pack failed", + log.Ctx(ctx).Warn("repack insert data into insert msg pack failed", zap.String("collectionName", insertMsg.CollectionName), zap.String("channelName", channel), zap.Error(err)) @@ -289,7 +289,7 @@ func repackInsertDataWithPartitionKey(ctx context.Context, err = setMsgID(ctx, msgPack.Msgs, idAllocator) if err != nil { - log.Error("failed to set msgID when repack insert data", + log.Ctx(ctx).Error("failed to set msgID when repack insert data", zap.String("collectionName", insertMsg.CollectionName), zap.Error(err)) return nil, err diff --git a/internal/proxy/search_reduce_util.go b/internal/proxy/search_reduce_util.go index 15029f1e01256..d398a2dd24689 100644 --- a/internal/proxy/search_reduce_util.go +++ b/internal/proxy/search_reduce_util.go @@ -237,7 +237,7 @@ func reduceSearchResultDataWithGroupBy(ctx context.Context, subSearchResultData ) for j = 0; j < groupBound; { - subSearchIdx, resultDataIdx := selectHighestScoreIndex(subSearchResultData, subSearchNqOffset, cursors, i) + subSearchIdx, resultDataIdx := selectHighestScoreIndex(ctx, subSearchResultData, subSearchNqOffset, cursors, i) if subSearchIdx == -1 { break } @@ -382,7 +382,7 @@ func reduceSearchResultDataNoGroupBy(ctx context.Context, subSearchResultData [] // skip offset results for k := int64(0); k < offset; k++ { - subSearchIdx, _ := selectHighestScoreIndex(subSearchResultData, subSearchNqOffset, cursors, i) + subSearchIdx, _ := selectHighestScoreIndex(ctx, subSearchResultData, subSearchNqOffset, cursors, i) if subSearchIdx == -1 { break } @@ -395,7 +395,7 @@ func reduceSearchResultDataNoGroupBy(ctx context.Context, subSearchResultData [] // From all the sub-query result sets of the i-th query vector, // find the sub-query result set index of the score j-th data, // and the index of the data in schemapb.SearchResultData - subSearchIdx, resultDataIdx := selectHighestScoreIndex(subSearchResultData, subSearchNqOffset, cursors, i) + subSearchIdx, resultDataIdx := selectHighestScoreIndex(ctx, subSearchResultData, subSearchNqOffset, cursors, i) if subSearchIdx == -1 { break } diff --git a/internal/proxy/task.go b/internal/proxy/task.go index 07b6cf6f864e1..c81aa47f969e9 100644 --- a/internal/proxy/task.go +++ b/internal/proxy/task.go @@ -208,7 +208,7 @@ func (t *createCollectionTask) OnEnqueue() error { return nil } -func (t *createCollectionTask) validatePartitionKey() error { +func (t *createCollectionTask) validatePartitionKey(ctx context.Context) error { idx := -1 for i, field := range t.schema.Fields { if field.GetIsPartitionKey() { @@ -263,7 +263,7 @@ func (t *createCollectionTask) validatePartitionKey() error { return fmt.Errorf("num_partitions should only be specified with partition key field enabled") } } else { - log.Info("create collection with partition key mode", + log.Ctx(ctx).Info("create collection with partition key mode", zap.String("collectionName", t.CollectionName), zap.Int64("numDefaultPartitions", t.GetNumPartitions())) } @@ -271,7 +271,7 @@ func (t *createCollectionTask) validatePartitionKey() error { return nil } -func (t *createCollectionTask) validateClusteringKey() error { +func (t *createCollectionTask) validateClusteringKey(ctx context.Context) error { idx := -1 for i, field := range t.schema.Fields { if field.GetIsClusteringKey() { @@ -288,7 +288,7 @@ func (t *createCollectionTask) validateClusteringKey() error { } if idx != -1 { - log.Info("create collection with clustering key", + log.Ctx(ctx).Info("create collection with clustering key", zap.String("collectionName", t.CollectionName), zap.String("clusteringKeyField", t.schema.Fields[idx].Name)) } @@ -358,17 +358,17 @@ func (t *createCollectionTask) PreExecute(ctx context.Context) error { } // validate partition key mode - if err := t.validatePartitionKey(); err != nil { + if err := t.validatePartitionKey(ctx); err != nil { return err } hasPartitionKey := hasParitionKeyModeField(t.schema) - if _, err := validatePartitionKeyIsolation(t.CollectionName, hasPartitionKey, t.GetProperties()...); err != nil { + if _, err := validatePartitionKeyIsolation(ctx, t.CollectionName, hasPartitionKey, t.GetProperties()...); err != nil { return err } // validate clustering key - if err := t.validateClusteringKey(); err != nil { + if err := t.validateClusteringKey(ctx); err != nil { return err } @@ -808,7 +808,7 @@ func (t *showCollectionsTask) Execute(ctx context.Context) error { for _, collectionName := range t.CollectionNames { collectionID, err := globalMetaCache.GetCollectionID(ctx, t.GetDbName(), collectionName) if err != nil { - log.Debug("Failed to get collection id.", zap.String("collectionName", collectionName), + log.Ctx(ctx).Debug("Failed to get collection id.", zap.String("collectionName", collectionName), zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "showCollections")) return err } @@ -854,14 +854,14 @@ func (t *showCollectionsTask) Execute(ctx context.Context) error { for offset, id := range resp.CollectionIDs { collectionName, ok := IDs2Names[id] if !ok { - log.Debug("Failed to get collection info. This collection may be not released", + log.Ctx(ctx).Debug("Failed to get collection info. This collection may be not released", zap.Int64("collectionID", id), zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "showCollections")) continue } collectionInfo, err := globalMetaCache.GetCollectionInfo(ctx, t.GetDbName(), collectionName, id) if err != nil { - log.Debug("Failed to get collection info.", zap.String("collectionName", collectionName), + log.Ctx(ctx).Debug("Failed to get collection info.", zap.String("collectionName", collectionName), zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "showCollections")) return err } @@ -953,7 +953,7 @@ func hasLazyLoadProp(props ...*commonpb.KeyValuePair) bool { return false } -func validatePartitionKeyIsolation(colName string, isPartitionKeyEnabled bool, props ...*commonpb.KeyValuePair) (bool, error) { +func validatePartitionKeyIsolation(ctx context.Context, colName string, isPartitionKeyEnabled bool, props ...*commonpb.KeyValuePair) (bool, error) { iso, err := common.IsPartitionKeyIsolationKvEnabled(props...) if err != nil { return false, err @@ -974,7 +974,7 @@ func validatePartitionKeyIsolation(colName string, isPartitionKeyEnabled bool, p "partition key isolation mode is enabled but current Milvus does not support it. Please contact us") } - log.Info("validated with partition key isolation", zap.String("collectionName", colName)) + log.Ctx(ctx).Info("validated with partition key isolation", zap.String("collectionName", colName)) return true, nil } @@ -1001,7 +1001,7 @@ func (t *alterCollectionTask) PreExecute(ctx context.Context) error { return err } // check if the new partition key isolation is valid to use - newIsoValue, err := validatePartitionKeyIsolation(t.CollectionName, isPartitionKeyMode, t.Properties...) + newIsoValue, err := validatePartitionKeyIsolation(ctx, t.CollectionName, isPartitionKeyMode, t.Properties...) if err != nil { return err } @@ -1011,7 +1011,7 @@ func (t *alterCollectionTask) PreExecute(ctx context.Context) error { } oldIsoValue := collBasicInfo.partitionKeyIsolation - log.Info("alter collection pre check with partition key isolation", + log.Ctx(ctx).Info("alter collection pre check with partition key isolation", zap.String("collectionName", t.CollectionName), zap.Bool("isPartitionKeyMode", isPartitionKeyMode), zap.Bool("newIsoValue", newIsoValue), @@ -1403,7 +1403,7 @@ func (t *showPartitionsTask) Execute(ctx context.Context) error { collectionName := t.CollectionName collectionID, err := globalMetaCache.GetCollectionID(ctx, t.GetDbName(), collectionName) if err != nil { - log.Debug("Failed to get collection id.", zap.String("collectionName", collectionName), + log.Ctx(ctx).Debug("Failed to get collection id.", zap.String("collectionName", collectionName), zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "showPartitions")) return err } @@ -1416,7 +1416,7 @@ func (t *showPartitionsTask) Execute(ctx context.Context) error { for _, partitionName := range t.PartitionNames { partitionID, err := globalMetaCache.GetPartitionID(ctx, t.GetDbName(), collectionName, partitionName) if err != nil { - log.Debug("Failed to get partition id.", zap.String("partitionName", partitionName), + log.Ctx(ctx).Debug("Failed to get partition id.", zap.String("partitionName", partitionName), zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "showPartitions")) return err } @@ -1447,13 +1447,13 @@ func (t *showPartitionsTask) Execute(ctx context.Context) error { for offset, id := range resp.PartitionIDs { partitionName, ok := IDs2Names[id] if !ok { - log.Debug("Failed to get partition id.", zap.String("partitionName", partitionName), + log.Ctx(ctx).Debug("Failed to get partition id.", zap.String("partitionName", partitionName), zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "showPartitions")) return errors.New("failed to show partitions") } partitionInfo, err := globalMetaCache.GetPartitionInfo(ctx, t.GetDbName(), collectionName, partitionName) if err != nil { - log.Debug("Failed to get partition id.", zap.String("partitionName", partitionName), + log.Ctx(ctx).Debug("Failed to get partition id.", zap.String("partitionName", partitionName), zap.Int64("requestID", t.Base.MsgID), zap.String("requestType", "showPartitions")) return err } @@ -1847,7 +1847,7 @@ func (t *loadPartitionsTask) Execute(ctx context.Context) error { if len(unindexedVecFields) != 0 { errMsg := fmt.Sprintf("there is no vector index on field: %v, please create index firstly", unindexedVecFields) - log.Debug(errMsg) + log.Ctx(ctx).Debug(errMsg) return errors.New(errMsg) } @@ -2258,7 +2258,7 @@ func (t *DescribeResourceGroupTask) Execute(ctx context.Context) error { for key, value := range collections { name, err := globalMetaCache.GetCollectionName(ctx, "", key) if err != nil { - log.Warn("failed to get collection name", + log.Ctx(ctx).Warn("failed to get collection name", zap.Int64("collectionID", key), zap.Error(err)) diff --git a/internal/proxy/task_database.go b/internal/proxy/task_database.go index 5ae997aab2d87..b44678eca7e14 100644 --- a/internal/proxy/task_database.go +++ b/internal/proxy/task_database.go @@ -363,12 +363,12 @@ func (t *describeDatabaseTask) Execute(ctx context.Context) error { } ret, err := t.rootCoord.DescribeDatabase(ctx, req) if err != nil { - log.Warn("DescribeDatabase failed", zap.Error(err)) + log.Ctx(ctx).Warn("DescribeDatabase failed", zap.Error(err)) return err } if err := merr.CheckRPCCall(ret, err); err != nil { - log.Warn("DescribeDatabase failed", zap.Error(err)) + log.Ctx(ctx).Warn("DescribeDatabase failed", zap.Error(err)) return err } diff --git a/internal/proxy/task_delete.go b/internal/proxy/task_delete.go index 1d9df1c59f8f6..13c3ad1b387c5 100644 --- a/internal/proxy/task_delete.go +++ b/internal/proxy/task_delete.go @@ -171,7 +171,7 @@ func (dt *deleteTask) Execute(ctx context.Context) (err error) { } } - log.Debug("send delete request to virtual channels", + log.Ctx(ctx).Debug("send delete request to virtual channels", zap.String("collectionName", dt.req.GetCollectionName()), zap.Int64("collectionID", dt.collectionID), zap.Strings("virtual_channels", dt.vChannels), @@ -393,7 +393,7 @@ func (dr *deleteRunner) Run(ctx context.Context) error { // need query from querynode before delete err = dr.complexDelete(ctx, plan) if err != nil { - log.Warn("complex delete failed,but delete some data", zap.Int64("count", dr.result.DeleteCnt), zap.String("expr", dr.req.GetExpr())) + log.Ctx(ctx).Warn("complex delete failed,but delete some data", zap.Int64("count", dr.result.DeleteCnt), zap.String("expr", dr.req.GetExpr())) return err } } @@ -421,7 +421,7 @@ func (dr *deleteRunner) produce(ctx context.Context, primaryKeys *schemapb.IDs) } if err := dr.queue.Enqueue(enqueuedTask); err != nil { - log.Error("Failed to enqueue delete task: " + err.Error()) + log.Ctx(ctx).Error("Failed to enqueue delete task: " + err.Error()) return nil, err } @@ -535,7 +535,7 @@ func (dr *deleteRunner) receiveQueryResult(ctx context.Context, client querypb.Q result, err := client.Recv() if err != nil { if err == io.EOF { - log.Debug("query stream for delete finished", zap.Int64("msgID", dr.msgID)) + log.Ctx(ctx).Debug("query stream for delete finished", zap.Int64("msgID", dr.msgID)) return nil } return err @@ -543,21 +543,21 @@ func (dr *deleteRunner) receiveQueryResult(ctx context.Context, client querypb.Q err = merr.Error(result.GetStatus()) if err != nil { - log.Warn("query stream for delete get error status", zap.Int64("msgID", dr.msgID), zap.Error(err)) + log.Ctx(ctx).Warn("query stream for delete get error status", zap.Int64("msgID", dr.msgID), zap.Error(err)) return err } if dr.limiter != nil { err := dr.limiter.Alloc(ctx, dr.dbID, map[int64][]int64{dr.collectionID: partitionIDs}, internalpb.RateType_DMLDelete, proto.Size(result.GetIds())) if err != nil { - log.Warn("query stream for delete failed because rate limiter", zap.Int64("msgID", dr.msgID), zap.Error(err)) + log.Ctx(ctx).Warn("query stream for delete failed because rate limiter", zap.Int64("msgID", dr.msgID), zap.Error(err)) return err } } task, err := dr.produce(ctx, result.GetIds()) if err != nil { - log.Warn("produce delete task failed", zap.Error(err)) + log.Ctx(ctx).Warn("produce delete task failed", zap.Error(err)) return err } task.allQueryCnt = result.GetAllRetrieveCount() @@ -590,26 +590,26 @@ func (dr *deleteRunner) complexDelete(ctx context.Context, plan *planpb.PlanNode dr.result.DeleteCnt = dr.count.Load() dr.result.Timestamp = dr.sessionTS.Load() if err != nil { - log.Warn("fail to execute complex delete", + log.Ctx(ctx).Warn("fail to execute complex delete", zap.Int64("deleteCnt", dr.result.GetDeleteCnt()), zap.Duration("interval", rc.ElapseSpan()), zap.Error(err)) return err } - log.Info("complex delete finished", zap.Int64("deleteCnt", dr.result.GetDeleteCnt()), zap.Duration("interval", rc.ElapseSpan())) + log.Ctx(ctx).Info("complex delete finished", zap.Int64("deleteCnt", dr.result.GetDeleteCnt()), zap.Duration("interval", rc.ElapseSpan())) return nil } func (dr *deleteRunner) simpleDelete(ctx context.Context, pk *schemapb.IDs, numRow int64) error { - log.Debug("get primary keys from expr", + log.Ctx(ctx).Debug("get primary keys from expr", zap.Int64("len of primary keys", numRow), zap.Int64("collectionID", dr.collectionID), zap.Int64("partitionID", dr.partitionID)) task, err := dr.produce(ctx, pk) if err != nil { - log.Warn("produce delete task failed") + log.Ctx(ctx).Warn("produce delete task failed") return err } diff --git a/internal/proxy/task_delete_streaming.go b/internal/proxy/task_delete_streaming.go index 25facf4d71c43..a46a02e64896b 100644 --- a/internal/proxy/task_delete_streaming.go +++ b/internal/proxy/task_delete_streaming.go @@ -60,7 +60,7 @@ func (dt *deleteTaskByStreamingService) Execute(ctx context.Context) (err error) } } - log.Debug("send delete request to virtual channels", + log.Ctx(ctx).Debug("send delete request to virtual channels", zap.String("collectionName", dt.req.GetCollectionName()), zap.Int64("collectionID", dt.collectionID), zap.Strings("virtual_channels", dt.vChannels), @@ -69,7 +69,7 @@ func (dt *deleteTaskByStreamingService) Execute(ctx context.Context) (err error) resp := streaming.WAL().AppendMessages(ctx, msgs...) if resp.UnwrapFirstError(); err != nil { - log.Warn("append messages to wal failed", zap.Error(err)) + log.Ctx(ctx).Warn("append messages to wal failed", zap.Error(err)) return err } dt.sessionTS = resp.MaxTimeTick() diff --git a/internal/proxy/task_flush_streaming.go b/internal/proxy/task_flush_streaming.go index e0cc8625adeb1..8f122ee2e4fc9 100644 --- a/internal/proxy/task_flush_streaming.go +++ b/internal/proxy/task_flush_streaming.go @@ -20,7 +20,6 @@ import ( "context" "fmt" - "github.com/pingcap/log" "go.uber.org/zap" "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" @@ -29,6 +28,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/schemapb" "github.com/milvus-io/milvus/internal/distributed/streaming" "github.com/milvus-io/milvus/internal/proto/datapb" + "github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/streaming/util/message" "github.com/milvus-io/milvus/pkg/util/commonpbutil" "github.com/milvus-io/milvus/pkg/util/merr" @@ -48,7 +48,7 @@ func (t *flushTaskByStreamingService) Execute(ctx context.Context) error { channelCps := make(map[string]*msgpb.MsgPosition) flushTs := t.BeginTs() - log.Info("flushTaskByStreamingService.Execute", zap.Int("collectionNum", len(t.CollectionNames)), zap.Uint64("flushTs", flushTs)) + log.Ctx(ctx).Info("flushTaskByStreamingService.Execute", zap.Int("collectionNum", len(t.CollectionNames)), zap.Uint64("flushTs", flushTs)) timeOfSeal, _ := tsoutil.ParseTS(flushTs) for _, collName := range t.CollectionNames { collID, err := globalMetaCache.GetCollectionID(t.ctx, t.DbName, collName) @@ -115,7 +115,7 @@ func (t *flushTaskByStreamingService) Execute(ctx context.Context) error { // sendManualFlushToWAL sends a manual flush message to WAL. func (t *flushTaskByStreamingService) sendManualFlushToWAL(ctx context.Context, collID int64, vchannel string, flushTs uint64) ([]int64, error) { - logger := log.With(zap.Int64("collectionID", collID), zap.String("vchannel", vchannel)) + logger := log.Ctx(ctx).With(zap.Int64("collectionID", collID), zap.String("vchannel", vchannel)) flushMsg, err := message.NewManualFlushMessageBuilderV2(). WithVChannel(vchannel). WithHeader(&message.ManualFlushMessageHeader{ diff --git a/internal/proxy/task_index.go b/internal/proxy/task_index.go index 914b752807980..57777814b8e89 100644 --- a/internal/proxy/task_index.go +++ b/internal/proxy/task_index.go @@ -167,7 +167,7 @@ func (cit *createIndexTask) parseFunctionParamsToIndex(indexParamsMap map[string return nil } -func (cit *createIndexTask) parseIndexParams() error { +func (cit *createIndexTask) parseIndexParams(ctx context.Context) error { cit.newExtraParams = cit.req.GetExtraParams() isVecIndex := typeutil.IsVectorType(cit.fieldSchema.DataType) @@ -194,13 +194,13 @@ func (cit *createIndexTask) parseIndexParams() error { specifyIndexType, exist := indexParamsMap[common.IndexTypeKey] if exist && specifyIndexType != "" { if err := indexparamcheck.ValidateMmapIndexParams(specifyIndexType, indexParamsMap); err != nil { - log.Ctx(cit.ctx).Warn("Invalid mmap type params", zap.String(common.IndexTypeKey, specifyIndexType), zap.Error(err)) + log.Ctx(ctx).Warn("Invalid mmap type params", zap.String(common.IndexTypeKey, specifyIndexType), zap.Error(err)) return merr.WrapErrParameterInvalidMsg("invalid mmap type params", err.Error()) } checker, err := indexparamcheck.GetIndexCheckerMgrInstance().GetChecker(specifyIndexType) // not enable hybrid index for user, used in milvus internally if err != nil || indexparamcheck.IsHYBRIDChecker(checker) { - log.Ctx(cit.ctx).Warn("Failed to get index checker", zap.String(common.IndexTypeKey, specifyIndexType)) + log.Ctx(ctx).Warn("Failed to get index checker", zap.String(common.IndexTypeKey, specifyIndexType)) return merr.WrapErrParameterInvalid("valid index", fmt.Sprintf("invalid index type: %s", specifyIndexType)) } } @@ -240,7 +240,7 @@ func (cit *createIndexTask) parseIndexParams() error { } else { specifyIndexType, exist := indexParamsMap[common.IndexTypeKey] if Params.AutoIndexConfig.Enable.GetAsBool() { // `enable` only for cloud instance. - log.Info("create index trigger AutoIndex", + log.Ctx(ctx).Info("create index trigger AutoIndex", zap.String("original type", specifyIndexType), zap.String("final type", Params.AutoIndexConfig.AutoIndexTypeName.GetValue())) @@ -275,7 +275,7 @@ func (cit *createIndexTask) parseIndexParams() error { indexParamsMap[k] = v fields = append(fields, zap.String(k, v)) } - log.Ctx(cit.ctx).Info("AutoIndex triggered", fields...) + log.Ctx(ctx).Info("AutoIndex triggered", fields...) } handle := func(numberParams int, autoIndexConfig map[string]string) error { @@ -378,7 +378,7 @@ func (cit *createIndexTask) parseIndexParams() error { } } - err := checkTrain(cit.fieldSchema, indexParamsMap) + err := checkTrain(ctx, cit.fieldSchema, indexParamsMap) if err != nil { return merr.WrapErrParameterInvalid("valid index params", "invalid index params", err.Error()) } @@ -411,20 +411,20 @@ func (cit *createIndexTask) parseIndexParams() error { func (cit *createIndexTask) getIndexedFieldAndFunction(ctx context.Context) error { schema, err := globalMetaCache.GetCollectionSchema(ctx, cit.req.GetDbName(), cit.req.GetCollectionName()) if err != nil { - log.Error("failed to get collection schema", zap.Error(err)) + log.Ctx(ctx).Error("failed to get collection schema", zap.Error(err)) return fmt.Errorf("failed to get collection schema: %s", err) } field, err := schema.schemaHelper.GetFieldFromName(cit.req.GetFieldName()) if err != nil { - log.Error("create index on non-exist field", zap.Error(err)) + log.Ctx(ctx).Error("create index on non-exist field", zap.Error(err)) return fmt.Errorf("cannot create index on non-exist field: %s", cit.req.GetFieldName()) } if field.IsFunctionOutput { function, err := schema.schemaHelper.GetFunctionByOutputField(field) if err != nil { - log.Error("create index failed, cannot find function of function output field", zap.Error(err)) + log.Ctx(ctx).Error("create index failed, cannot find function of function output field", zap.Error(err)) return fmt.Errorf("create index failed, cannot find function of function output field: %s", cit.req.GetFieldName()) } cit.functionSchema = function @@ -455,7 +455,7 @@ func fillDimension(field *schemapb.FieldSchema, indexParams map[string]string) e return nil } -func checkTrain(field *schemapb.FieldSchema, indexParams map[string]string) error { +func checkTrain(ctx context.Context, field *schemapb.FieldSchema, indexParams map[string]string) error { indexType := indexParams[common.IndexTypeKey] if indexType == indexparamcheck.IndexHybrid { @@ -466,7 +466,7 @@ func checkTrain(field *schemapb.FieldSchema, indexParams map[string]string) erro } checker, err := indexparamcheck.GetIndexCheckerMgrInstance().GetChecker(indexType) if err != nil { - log.Warn("Failed to get index checker", zap.String(common.IndexTypeKey, indexType)) + log.Ctx(ctx).Warn("Failed to get index checker", zap.String(common.IndexTypeKey, indexType)) return fmt.Errorf("invalid index type: %s", indexType) } @@ -486,12 +486,12 @@ func checkTrain(field *schemapb.FieldSchema, indexParams map[string]string) erro } if err := checker.CheckValidDataType(indexType, field); err != nil { - log.Info("create index with invalid data type", zap.Error(err), zap.String("data_type", field.GetDataType().String())) + log.Ctx(ctx).Info("create index with invalid data type", zap.Error(err), zap.String("data_type", field.GetDataType().String())) return err } if err := checker.CheckTrain(field.DataType, indexParams); err != nil { - log.Info("create index with invalid parameters", zap.Error(err)) + log.Ctx(ctx).Info("create index with invalid parameters", zap.Error(err)) return err } @@ -517,7 +517,7 @@ func (cit *createIndexTask) PreExecute(ctx context.Context) error { } // check index param, not accurate, only some static rules - err = cit.parseIndexParams() + err = cit.parseIndexParams(ctx) if err != nil { return err } @@ -744,7 +744,7 @@ func (dit *describeIndexTask) PreExecute(ctx context.Context) error { func (dit *describeIndexTask) Execute(ctx context.Context) error { schema, err := globalMetaCache.GetCollectionSchema(ctx, dit.GetDbName(), dit.GetCollectionName()) if err != nil { - log.Error("failed to get collection schema", zap.Error(err)) + log.Ctx(ctx).Error("failed to get collection schema", zap.Error(err)) return fmt.Errorf("failed to get collection schema: %s", err) } @@ -766,7 +766,7 @@ func (dit *describeIndexTask) Execute(ctx context.Context) error { for _, indexInfo := range resp.IndexInfos { field, err := schema.schemaHelper.GetFieldFromID(indexInfo.FieldID) if err != nil { - log.Error("failed to get collection field", zap.Error(err)) + log.Ctx(ctx).Error("failed to get collection field", zap.Error(err)) return fmt.Errorf("failed to get collection field: %d", indexInfo.FieldID) } params := indexInfo.GetUserIndexParams() @@ -863,7 +863,7 @@ func (dit *getIndexStatisticsTask) PreExecute(ctx context.Context) error { func (dit *getIndexStatisticsTask) Execute(ctx context.Context) error { schema, err := globalMetaCache.GetCollectionSchema(ctx, dit.GetDbName(), dit.GetCollectionName()) if err != nil { - log.Error("failed to get collection schema", zap.String("collection_name", dit.GetCollectionName()), zap.Error(err)) + log.Ctx(ctx).Error("failed to get collection schema", zap.String("collection_name", dit.GetCollectionName()), zap.Error(err)) return fmt.Errorf("failed to get collection schema: %s", dit.GetCollectionName()) } schemaHelper := schema.schemaHelper @@ -879,7 +879,7 @@ func (dit *getIndexStatisticsTask) Execute(ctx context.Context) error { for _, indexInfo := range resp.IndexInfos { field, err := schemaHelper.GetFieldFromID(indexInfo.FieldID) if err != nil { - log.Error("failed to get collection field", zap.Int64("field_id", indexInfo.FieldID), zap.Error(err)) + log.Ctx(ctx).Error("failed to get collection field", zap.Int64("field_id", indexInfo.FieldID), zap.Error(err)) return fmt.Errorf("failed to get collection field: %d", indexInfo.FieldID) } params := indexInfo.GetUserIndexParams() diff --git a/internal/proxy/task_index_test.go b/internal/proxy/task_index_test.go index 554856e9871f6..780dea5041f4f 100644 --- a/internal/proxy/task_index_test.go +++ b/internal/proxy/task_index_test.go @@ -374,7 +374,7 @@ func Test_sparse_parseIndexParams(t *testing.T) { } t.Run("parse index params", func(t *testing.T) { - err := cit.parseIndexParams() + err := cit.parseIndexParams(context.TODO()) assert.NoError(t, err) assert.ElementsMatch(t, @@ -452,7 +452,7 @@ func Test_parseIndexParams(t *testing.T) { } t.Run("parse index params", func(t *testing.T) { - err := cit.parseIndexParams() + err := cit.parseIndexParams(context.TODO()) assert.NoError(t, err) assert.ElementsMatch(t, @@ -545,7 +545,7 @@ func Test_parseIndexParams(t *testing.T) { indexParamsStr, err := json.Marshal(indexParams) assert.NoError(t, err) Params.Save(Params.AutoIndexConfig.IndexParams.Key, string(indexParamsStr)) - err = cit2.parseIndexParams() + err = cit2.parseIndexParams(context.TODO()) assert.NoError(t, err) assert.ElementsMatch(t, @@ -622,7 +622,7 @@ func Test_parseIndexParams(t *testing.T) { DataType: schemapb.DataType_JSON, }, } - err := cit3.parseIndexParams() + err := cit3.parseIndexParams(context.TODO()) assert.Error(t, err) }) @@ -644,7 +644,7 @@ func Test_parseIndexParams(t *testing.T) { DataType: schemapb.DataType_VarChar, }, } - err := cit.parseIndexParams() + err := cit.parseIndexParams(context.TODO()) assert.NoError(t, err) }) @@ -661,7 +661,7 @@ func Test_parseIndexParams(t *testing.T) { DataType: schemapb.DataType_VarChar, }, } - err := cit.parseIndexParams() + err := cit.parseIndexParams(context.TODO()) assert.NoError(t, err) sortKeyValuePairs(cit.newIndexParams) assert.Equal(t, cit.newIndexParams, []*commonpb.KeyValuePair{ @@ -687,7 +687,7 @@ func Test_parseIndexParams(t *testing.T) { DataType: schemapb.DataType_Int64, }, } - err := cit.parseIndexParams() + err := cit.parseIndexParams(context.TODO()) assert.NoError(t, err) }) @@ -704,7 +704,7 @@ func Test_parseIndexParams(t *testing.T) { DataType: schemapb.DataType_Int64, }, } - err := cit.parseIndexParams() + err := cit.parseIndexParams(context.TODO()) assert.NoError(t, err) sortKeyValuePairs(cit.newIndexParams) assert.Equal(t, cit.newIndexParams, []*commonpb.KeyValuePair{ @@ -731,7 +731,7 @@ func Test_parseIndexParams(t *testing.T) { DataType: schemapb.DataType_VarChar, }, } - err := cit.parseIndexParams() + err := cit.parseIndexParams(context.TODO()) assert.NoError(t, err) }) @@ -754,7 +754,7 @@ func Test_parseIndexParams(t *testing.T) { DataType: schemapb.DataType_Int64, }, } - err := cit.parseIndexParams() + err := cit.parseIndexParams(context.TODO()) assert.NoError(t, err) }) @@ -776,7 +776,7 @@ func Test_parseIndexParams(t *testing.T) { DataType: schemapb.DataType_Int64, }, } - err := cit.parseIndexParams() + err := cit.parseIndexParams(context.TODO()) assert.Error(t, err) }) @@ -812,7 +812,7 @@ func Test_parseIndexParams(t *testing.T) { ElementType: schemapb.DataType_Int64, }, } - err := cit3.parseIndexParams() + err := cit3.parseIndexParams(context.TODO()) assert.Error(t, err) }) @@ -859,7 +859,7 @@ func Test_parseIndexParams(t *testing.T) { DataType: schemapb.DataType_VarChar, }, } - err := cit4.parseIndexParams() + err := cit4.parseIndexParams(context.TODO()) assert.Error(t, err) cit5 := &createIndexTask{ @@ -904,7 +904,7 @@ func Test_parseIndexParams(t *testing.T) { DataType: schemapb.DataType_Int64, }, } - err = cit5.parseIndexParams() + err = cit5.parseIndexParams(context.TODO()) assert.Error(t, err) }) @@ -932,7 +932,7 @@ func Test_parseIndexParams(t *testing.T) { }, } - err = cit.parseIndexParams() + err = cit.parseIndexParams(context.TODO()) assert.NoError(t, err) sortKeyValuePairs(cit.newIndexParams) assert.Equal(t, cit.newIndexParams, []*commonpb.KeyValuePair{ @@ -961,7 +961,7 @@ func Test_parseIndexParams(t *testing.T) { }, } - err := cit.parseIndexParams() + err := cit.parseIndexParams(context.TODO()) assert.NoError(t, err) sortKeyValuePairs(cit.newIndexParams) assert.Equal(t, cit.newIndexParams, []*commonpb.KeyValuePair{ @@ -990,7 +990,7 @@ func Test_parseIndexParams(t *testing.T) { }, } - err := cit.parseIndexParams() + err := cit.parseIndexParams(context.TODO()) assert.NoError(t, err) sortKeyValuePairs(cit.newIndexParams) assert.Equal(t, cit.newIndexParams, []*commonpb.KeyValuePair{ @@ -1019,7 +1019,7 @@ func Test_parseIndexParams(t *testing.T) { }, } - err := cit.parseIndexParams() + err := cit.parseIndexParams(context.TODO()) assert.Error(t, err) }) @@ -1051,7 +1051,7 @@ func Test_parseIndexParams(t *testing.T) { }, } - err := cit.parseIndexParams() + err := cit.parseIndexParams(context.TODO()) assert.Error(t, err) }) @@ -1087,7 +1087,7 @@ func Test_parseIndexParams(t *testing.T) { }, }, } - err := cit.parseIndexParams() + err := cit.parseIndexParams(context.TODO()) // Out of range in json: param 'M' (3000) should be in range [2, 2048] assert.Error(t, err) }) @@ -1142,7 +1142,7 @@ func Test_parseIndexParams_AutoIndex_WithType(t *testing.T) { }, }, } - err := task.parseIndexParams() + err := task.parseIndexParams(context.TODO()) assert.NoError(t, err) assert.True(t, task.userAutoIndexMetricTypeSpecified) assert.ElementsMatch(t, []*commonpb.KeyValuePair{ @@ -1162,7 +1162,7 @@ func Test_parseIndexParams_AutoIndex_WithType(t *testing.T) { }, }, } - err := task.parseIndexParams() + err := task.parseIndexParams(context.TODO()) assert.NoError(t, err) assert.True(t, task.userAutoIndexMetricTypeSpecified) assert.ElementsMatch(t, []*commonpb.KeyValuePair{ @@ -1181,7 +1181,7 @@ func Test_parseIndexParams_AutoIndex_WithType(t *testing.T) { }, }, } - err := task.parseIndexParams() + err := task.parseIndexParams(context.TODO()) assert.NoError(t, err) assert.True(t, task.userAutoIndexMetricTypeSpecified) assert.ElementsMatch(t, []*commonpb.KeyValuePair{ @@ -1232,7 +1232,7 @@ func Test_parseIndexParams_AutoIndex(t *testing.T) { ExtraParams: make([]*commonpb.KeyValuePair, 0), }, } - err := task.parseIndexParams() + err := task.parseIndexParams(context.TODO()) assert.NoError(t, err) assert.False(t, task.userAutoIndexMetricTypeSpecified) assert.ElementsMatch(t, []*commonpb.KeyValuePair{ @@ -1248,7 +1248,7 @@ func Test_parseIndexParams_AutoIndex(t *testing.T) { ExtraParams: make([]*commonpb.KeyValuePair, 0), }, } - err := task.parseIndexParams() + err := task.parseIndexParams(context.TODO()) assert.NoError(t, err) assert.False(t, task.userAutoIndexMetricTypeSpecified) assert.ElementsMatch(t, []*commonpb.KeyValuePair{ @@ -1264,7 +1264,7 @@ func Test_parseIndexParams_AutoIndex(t *testing.T) { ExtraParams: make([]*commonpb.KeyValuePair, 0), }, } - err := task.parseIndexParams() + err := task.parseIndexParams(context.TODO()) assert.NoError(t, err) assert.False(t, task.userAutoIndexMetricTypeSpecified) assert.ElementsMatch(t, []*commonpb.KeyValuePair{ @@ -1282,7 +1282,7 @@ func Test_parseIndexParams_AutoIndex(t *testing.T) { }, }, } - err := task.parseIndexParams() + err := task.parseIndexParams(context.TODO()) assert.NoError(t, err) assert.True(t, task.userAutoIndexMetricTypeSpecified) assert.ElementsMatch(t, []*commonpb.KeyValuePair{ @@ -1301,7 +1301,7 @@ func Test_parseIndexParams_AutoIndex(t *testing.T) { }, }, } - err := task.parseIndexParams() + err := task.parseIndexParams(context.TODO()) assert.NoError(t, err) assert.ElementsMatch(t, []*commonpb.KeyValuePair{ {Key: common.IndexTypeKey, Value: AutoIndexName}, @@ -1318,7 +1318,7 @@ func Test_parseIndexParams_AutoIndex(t *testing.T) { }, }, } - err := task.parseIndexParams() + err := task.parseIndexParams(context.TODO()) assert.Error(t, err) }) @@ -1332,7 +1332,7 @@ func Test_parseIndexParams_AutoIndex(t *testing.T) { }, }, } - err := task.parseIndexParams() + err := task.parseIndexParams(context.TODO()) assert.Error(t, err) }) @@ -1347,7 +1347,7 @@ func Test_parseIndexParams_AutoIndex(t *testing.T) { }, }, } - err := task.parseIndexParams() + err := task.parseIndexParams(context.TODO()) assert.Error(t, err) }) } diff --git a/internal/proxy/task_insert.go b/internal/proxy/task_insert.go index 620b469d2fa96..2ce037d018079 100644 --- a/internal/proxy/task_insert.go +++ b/internal/proxy/task_insert.go @@ -114,20 +114,20 @@ func (it *insertTask) PreExecute(ctx context.Context) error { collectionName := it.insertMsg.CollectionName if err := validateCollectionName(collectionName); err != nil { - log.Warn("valid collection name failed", zap.String("collectionName", collectionName), zap.Error(err)) + log.Ctx(ctx).Warn("valid collection name failed", zap.String("collectionName", collectionName), zap.Error(err)) return err } maxInsertSize := Params.QuotaConfig.MaxInsertSize.GetAsInt() if maxInsertSize != -1 && it.insertMsg.Size() > maxInsertSize { - log.Warn("insert request size exceeds maxInsertSize", + log.Ctx(ctx).Warn("insert request size exceeds maxInsertSize", zap.Int("request size", it.insertMsg.Size()), zap.Int("maxInsertSize", maxInsertSize)) return merr.WrapErrAsInputError(merr.WrapErrParameterTooLarge("insert request size exceeds maxInsertSize")) } schema, err := globalMetaCache.GetCollectionSchema(ctx, it.insertMsg.GetDbName(), collectionName) if err != nil { - log.Warn("get collection schema from global meta cache failed", zap.String("collectionName", collectionName), zap.Error(err)) + log.Ctx(ctx).Warn("get collection schema from global meta cache failed", zap.String("collectionName", collectionName), zap.Error(err)) return merr.WrapErrAsInputErrorWhen(err, merr.ErrCollectionNotFound, merr.ErrDatabaseNotFound) } it.schema = schema.CollectionSchema diff --git a/internal/proxy/task_insert_streaming.go b/internal/proxy/task_insert_streaming.go index 0a99d9816844e..6116839b0e00f 100644 --- a/internal/proxy/task_insert_streaming.go +++ b/internal/proxy/task_insert_streaming.go @@ -133,7 +133,7 @@ func repackInsertDataWithPartitionKeyForStreamingService( channel2RowOffsets := assignChannelsByPK(result.IDs, channelNames, insertMsg) partitionNames, err := getDefaultPartitionsInPartitionKeyMode(ctx, insertMsg.GetDbName(), insertMsg.CollectionName) if err != nil { - log.Warn("get default partition names failed in partition key mode", + log.Ctx(ctx).Warn("get default partition names failed in partition key mode", zap.String("collectionName", insertMsg.CollectionName), zap.Error(err)) return nil, err @@ -144,7 +144,7 @@ func repackInsertDataWithPartitionKeyForStreamingService( for _, partitionName := range partitionNames { partitionID, err := globalMetaCache.GetPartitionID(ctx, insertMsg.GetDbName(), insertMsg.CollectionName, partitionName) if err != nil { - log.Warn("get partition id failed", + log.Ctx(ctx).Warn("get partition id failed", zap.String("collectionName", insertMsg.CollectionName), zap.String("partitionName", partitionName), zap.Error(err)) @@ -155,7 +155,7 @@ func repackInsertDataWithPartitionKeyForStreamingService( hashValues, err := typeutil.HashKey2Partitions(partitionKeys, partitionNames) if err != nil { - log.Warn("has partition keys to partitions failed", + log.Ctx(ctx).Warn("has partition keys to partitions failed", zap.String("collectionName", insertMsg.CollectionName), zap.Error(err)) return nil, err diff --git a/internal/proxy/task_policies.go b/internal/proxy/task_policies.go index f3a047bd38385..1c49a08ce621b 100644 --- a/internal/proxy/task_policies.go +++ b/internal/proxy/task_policies.go @@ -35,13 +35,13 @@ func RoundRobinPolicy( for _, target := range leaders { qn, err := mgr.GetClient(ctx, target) if err != nil { - log.Warn("query channel failed, node not available", zap.String("channel", channel), zap.Int64("nodeID", target.nodeID), zap.Error(err)) + log.Ctx(ctx).Warn("query channel failed, node not available", zap.String("channel", channel), zap.Int64("nodeID", target.nodeID), zap.Error(err)) combineErr = merr.Combine(combineErr, err) continue } err = query(ctx, target.nodeID, qn, channel) if err != nil { - log.Warn("query channel failed", zap.String("channel", channel), zap.Int64("nodeID", target.nodeID), zap.Error(err)) + log.Ctx(ctx).Warn("query channel failed", zap.String("channel", channel), zap.Int64("nodeID", target.nodeID), zap.Error(err)) combineErr = merr.Combine(combineErr, err) continue } diff --git a/internal/proxy/task_query.go b/internal/proxy/task_query.go index 3bbdaffd9ae29..910227e796135 100644 --- a/internal/proxy/task_query.go +++ b/internal/proxy/task_query.go @@ -284,14 +284,14 @@ func (t *queryTask) CanSkipAllocTimestamp() bool { } else { collID, err := globalMetaCache.GetCollectionID(context.Background(), t.request.GetDbName(), t.request.GetCollectionName()) if err != nil { // err is not nil if collection not exists - log.Warn("query task get collectionID failed, can't skip alloc timestamp", + log.Ctx(t.ctx).Warn("query task get collectionID failed, can't skip alloc timestamp", zap.String("collectionName", t.request.GetCollectionName()), zap.Error(err)) return false } collectionInfo, err2 := globalMetaCache.GetCollectionInfo(context.Background(), t.request.GetDbName(), t.request.GetCollectionName(), collID) if err2 != nil { - log.Warn("query task get collection info failed, can't skip alloc timestamp", + log.Ctx(t.ctx).Warn("query task get collection info failed, can't skip alloc timestamp", zap.String("collectionName", t.request.GetCollectionName()), zap.Error(err)) return false } diff --git a/internal/proxy/task_scheduler.go b/internal/proxy/task_scheduler.go index 878363c94e161..da1a2ededbdba 100644 --- a/internal/proxy/task_scheduler.go +++ b/internal/proxy/task_scheduler.go @@ -127,7 +127,7 @@ func (queue *baseTaskQueue) AddActiveTask(t task) { tID := t.ID() _, ok := queue.activeTasks[tID] if ok { - log.Warn("Proxy task with tID already in active task list!", zap.Int64("ID", tID)) + log.Ctx(t.TraceCtx()).Warn("Proxy task with tID already in active task list!", zap.Int64("ID", tID)) } queue.activeTasks[tID] = t @@ -142,7 +142,7 @@ func (queue *baseTaskQueue) PopActiveTask(taskID UniqueID) task { return t } - log.Warn("Proxy task not in active task list! ts", zap.Int64("taskID", taskID)) + log.Ctx(context.TODO()).Warn("Proxy task not in active task list! ts", zap.Int64("taskID", taskID)) return t } @@ -250,7 +250,7 @@ func (queue *dmTaskQueue) Enqueue(t task) error { dmt := t.(dmlTask) err := dmt.setChannels() if err != nil { - log.Warn("setChannels failed when Enqueue", zap.Int64("taskID", t.ID()), zap.Error(err)) + log.Ctx(t.TraceCtx()).Warn("setChannels failed when Enqueue", zap.Int64("taskID", t.ID()), zap.Error(err)) return err } @@ -279,10 +279,10 @@ func (queue *dmTaskQueue) PopActiveTask(taskID UniqueID) task { defer queue.statsLock.Unlock() delete(queue.activeTasks, taskID) - log.Debug("Proxy dmTaskQueue popPChanStats", zap.Int64("taskID", t.ID())) + log.Ctx(t.TraceCtx()).Debug("Proxy dmTaskQueue popPChanStats", zap.Int64("taskID", t.ID())) queue.popPChanStats(t) } else { - log.Warn("Proxy task not in active task list!", zap.Int64("taskID", taskID)) + log.Ctx(context.TODO()).Warn("Proxy task not in active task list!", zap.Int64("taskID", taskID)) } return t } @@ -567,7 +567,7 @@ func (sched *taskScheduler) queryLoop() { return struct{}{}, nil }) } else { - log.Debug("query queue is empty ...") + log.Ctx(context.TODO()).Debug("query queue is empty ...") } } } diff --git a/internal/proxy/task_search.go b/internal/proxy/task_search.go index 3dc48cfe9503c..6995b07297f76 100644 --- a/internal/proxy/task_search.go +++ b/internal/proxy/task_search.go @@ -99,14 +99,14 @@ func (t *searchTask) CanSkipAllocTimestamp() bool { } else { collID, err := globalMetaCache.GetCollectionID(context.Background(), t.request.GetDbName(), t.request.GetCollectionName()) if err != nil { // err is not nil if collection not exists - log.Warn("search task get collectionID failed, can't skip alloc timestamp", + log.Ctx(t.ctx).Warn("search task get collectionID failed, can't skip alloc timestamp", zap.String("collectionName", t.request.GetCollectionName()), zap.Error(err)) return false } collectionInfo, err2 := globalMetaCache.GetCollectionInfo(context.Background(), t.request.GetDbName(), t.request.GetCollectionName(), collID) if err2 != nil { - log.Warn("search task get collection info failed, can't skip alloc timestamp", + log.Ctx(t.ctx).Warn("search task get collection info failed, can't skip alloc timestamp", zap.String("collectionName", t.request.GetCollectionName()), zap.Error(err)) return false } @@ -320,20 +320,20 @@ func setQueryInfoIfMvEnable(queryInfo *planpb.QueryInfo, t *searchTask, plan *pl if t.enableMaterializedView { partitionKeyFieldSchema, err := typeutil.GetPartitionKeyFieldSchema(t.schema.CollectionSchema) if err != nil { - log.Warn("failed to get partition key field schema", zap.Error(err)) + log.Ctx(t.ctx).Warn("failed to get partition key field schema", zap.Error(err)) return err } if typeutil.IsFieldDataTypeSupportMaterializedView(partitionKeyFieldSchema) { collInfo, colErr := globalMetaCache.GetCollectionInfo(t.ctx, t.request.GetDbName(), t.collectionName, t.CollectionID) if colErr != nil { - log.Warn("failed to get collection info", zap.Error(colErr)) + log.Ctx(t.ctx).Warn("failed to get collection info", zap.Error(colErr)) return err } if collInfo.partitionKeyIsolation { expr, err := exprutil.ParseExprFromPlan(plan) if err != nil { - log.Warn("failed to parse expr from plan during MV", zap.Error(err)) + log.Ctx(t.ctx).Warn("failed to parse expr from plan during MV", zap.Error(err)) return err } err = exprutil.ValidatePartitionKeyIsolation(expr) @@ -526,12 +526,12 @@ func (t *searchTask) tryGeneratePlan(params []*commonpb.KeyValuePair, dsl string searchInfo.planInfo.QueryFieldId = annField.GetFieldID() plan, planErr := planparserv2.CreateSearchPlan(t.schema.schemaHelper, dsl, annsFieldName, searchInfo.planInfo, exprTemplateValues) if planErr != nil { - log.Warn("failed to create query plan", zap.Error(planErr), + log.Ctx(t.ctx).Warn("failed to create query plan", zap.Error(planErr), zap.String("dsl", dsl), // may be very large if large term passed. zap.String("anns field", annsFieldName), zap.Any("query info", searchInfo.planInfo)) return nil, nil, 0, false, merr.WrapErrParameterInvalidMsg("failed to create query plan: %v", planErr) } - log.Debug("create query plan", + log.Ctx(t.ctx).Debug("create query plan", zap.String("dsl", t.request.Dsl), // may be very large if large term passed. zap.String("anns field", annsFieldName), zap.Any("query info", searchInfo.planInfo)) return plan, searchInfo.planInfo, searchInfo.offset, searchInfo.isIterator, nil @@ -540,13 +540,13 @@ func (t *searchTask) tryGeneratePlan(params []*commonpb.KeyValuePair, dsl string func (t *searchTask) tryParsePartitionIDsFromPlan(plan *planpb.PlanNode) ([]int64, error) { expr, err := exprutil.ParseExprFromPlan(plan) if err != nil { - log.Warn("failed to parse expr", zap.Error(err)) + log.Ctx(t.ctx).Warn("failed to parse expr", zap.Error(err)) return nil, err } partitionKeys := exprutil.ParseKeys(expr, exprutil.PartitionKey) hashedPartitionNames, err := assignPartitionKeys(t.ctx, t.request.GetDbName(), t.collectionName, partitionKeys) if err != nil { - log.Warn("failed to assign partition keys", zap.Error(err)) + log.Ctx(t.ctx).Warn("failed to assign partition keys", zap.Error(err)) return nil, err } @@ -554,7 +554,7 @@ func (t *searchTask) tryParsePartitionIDsFromPlan(plan *planpb.PlanNode) ([]int6 // translate partition name to partition ids. Use regex-pattern to match partition name. PartitionIDs, err2 := getPartitionIDs(t.ctx, t.request.GetDbName(), t.collectionName, hashedPartitionNames) if err2 != nil { - log.Warn("failed to get partition ids", zap.Error(err2)) + log.Ctx(t.ctx).Warn("failed to get partition ids", zap.Error(err2)) return nil, err2 } return PartitionIDs, nil @@ -597,6 +597,7 @@ func (t *searchTask) reduceResults(ctx context.Context, toReduceResults []*inter ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "reduceResults") defer sp.End() + log := log.Ctx(ctx) // Decode all search results validSearchResults, err := decodeSearchResults(ctx, toReduceResults) if err != nil { @@ -985,7 +986,7 @@ func checkSearchResultData(data *schemapb.SearchResultData, nq int64, topk int64 return nil } -func selectHighestScoreIndex(subSearchResultData []*schemapb.SearchResultData, subSearchNqOffset [][]int64, cursors []int64, qi int64) (int, int64) { +func selectHighestScoreIndex(ctx context.Context, subSearchResultData []*schemapb.SearchResultData, subSearchNqOffset [][]int64, cursors []int64, qi int64) (int, int64) { var ( subSearchIdx = -1 resultDataIdx int64 = -1 @@ -1007,7 +1008,7 @@ func selectHighestScoreIndex(subSearchResultData []*schemapb.SearchResultData, s if subSearchIdx == -1 { // A bad case happens where Knowhere returns distance/score == +/-maxFloat32 // by mistake. - log.Error("a bad score is returned, something is wrong here!", zap.Float32("score", sScore)) + log.Ctx(ctx).Error("a bad score is returned, something is wrong here!", zap.Float32("score", sScore)) } else if typeutil.ComparePK( typeutil.GetPK(subSearchResultData[i].GetIds(), sIdx), typeutil.GetPK(subSearchResultData[subSearchIdx].GetIds(), resultDataIdx)) { diff --git a/internal/proxy/task_search_test.go b/internal/proxy/task_search_test.go index bab8ee02bd436..a0c5042376054 100644 --- a/internal/proxy/task_search_test.go +++ b/internal/proxy/task_search_test.go @@ -1369,7 +1369,7 @@ func TestTaskSearch_selectHighestScoreIndex(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { for nqNum := int64(0); nqNum < test.args.nq; nqNum++ { - idx, dataIdx := selectHighestScoreIndex(test.args.subSearchResultData, test.args.subSearchNqOffset, test.args.cursors, nqNum) + idx, dataIdx := selectHighestScoreIndex(context.TODO(), test.args.subSearchResultData, test.args.subSearchNqOffset, test.args.cursors, nqNum) assert.Equal(t, test.expectedIdx[nqNum], idx) assert.Equal(t, test.expectedDataIdx[nqNum], int(dataIdx)) } @@ -1493,7 +1493,7 @@ func TestTaskSearch_selectHighestScoreIndex(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { for nqNum := int64(0); nqNum < test.args.nq; nqNum++ { - idx, dataIdx := selectHighestScoreIndex(test.args.subSearchResultData, test.args.subSearchNqOffset, test.args.cursors, nqNum) + idx, dataIdx := selectHighestScoreIndex(context.TODO(), test.args.subSearchResultData, test.args.subSearchNqOffset, test.args.cursors, nqNum) assert.Equal(t, test.expectedIdx[nqNum], idx) assert.Equal(t, test.expectedDataIdx[nqNum], int(dataIdx)) } diff --git a/internal/proxy/task_statistic.go b/internal/proxy/task_statistic.go index 09e43b64df506..a0b456253b4ab 100644 --- a/internal/proxy/task_statistic.go +++ b/internal/proxy/task_statistic.go @@ -178,7 +178,7 @@ func (g *getStatisticsTask) Execute(ctx context.Context) error { if err != nil { return err } - log.Debug("get collection statistics from DataCoord execute done") + log.Ctx(ctx).Debug("get collection statistics from DataCoord execute done") } return nil } @@ -194,10 +194,10 @@ func (g *getStatisticsTask) PostExecute(ctx context.Context) error { toReduceResults := make([]*internalpb.GetStatisticsResponse, 0) select { case <-g.TraceCtx().Done(): - log.Debug("wait to finish timeout!") + log.Ctx(ctx).Debug("wait to finish timeout!") return nil default: - log.Debug("all get statistics are finished or canceled") + log.Ctx(ctx).Debug("all get statistics are finished or canceled") g.resultBuf.Range(func(res *internalpb.GetStatisticsResponse) bool { toReduceResults = append(toReduceResults, res) log.Debug("proxy receives one get statistic response", @@ -220,7 +220,7 @@ func (g *getStatisticsTask) PostExecute(ctx context.Context) error { Stats: result, } - log.Debug("get statistics post execute done", zap.Any("result", result)) + log.Ctx(ctx).Debug("get statistics post execute done", zap.Any("result", result)) return nil } @@ -283,7 +283,7 @@ func (g *getStatisticsTask) getStatisticsShard(ctx context.Context, nodeID int64 } result, err := qn.GetStatistics(ctx, req) if err != nil { - log.Warn("QueryNode statistic return error", + log.Ctx(ctx).Warn("QueryNode statistic return error", zap.Int64("nodeID", nodeID), zap.String("channel", channel), zap.Error(err)) @@ -291,14 +291,14 @@ func (g *getStatisticsTask) getStatisticsShard(ctx context.Context, nodeID int64 return err } if result.GetStatus().GetErrorCode() == commonpb.ErrorCode_NotShardLeader { - log.Warn("QueryNode is not shardLeader", + log.Ctx(ctx).Warn("QueryNode is not shardLeader", zap.Int64("nodeID", nodeID), zap.String("channel", channel)) globalMetaCache.DeprecateShardCache(g.request.GetDbName(), g.collectionName) return errInvalidShardLeaders } if result.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success { - log.Warn("QueryNode statistic result error", + log.Ctx(ctx).Warn("QueryNode statistic result error", zap.Int64("nodeID", nodeID), zap.String("reason", result.GetStatus().GetReason())) return errors.Wrapf(merr.Error(result.GetStatus()), "fail to get statistic on QueryNode ID=%d", nodeID) diff --git a/internal/proxy/task_test.go b/internal/proxy/task_test.go index 1f74590c16bcc..0f0c0b6827964 100644 --- a/internal/proxy/task_test.go +++ b/internal/proxy/task_test.go @@ -2356,7 +2356,7 @@ func Test_checkTrain(t *testing.T) { "nlist": "1024", common.MetricTypeKey: "L2", } - assert.NoError(t, checkTrain(f, m)) + assert.NoError(t, checkTrain(context.TODO(), f, m)) }) t.Run("scalar", func(t *testing.T) { @@ -2366,7 +2366,7 @@ func Test_checkTrain(t *testing.T) { m := map[string]string{ common.IndexTypeKey: "scalar", } - assert.Error(t, checkTrain(f, m)) + assert.Error(t, checkTrain(context.TODO(), f, m)) }) t.Run("dimension mismatch", func(t *testing.T) { @@ -2385,7 +2385,7 @@ func Test_checkTrain(t *testing.T) { common.MetricTypeKey: "L2", common.DimKey: "8", } - assert.Error(t, checkTrain(f, m)) + assert.Error(t, checkTrain(context.TODO(), f, m)) }) t.Run("nlist test", func(t *testing.T) { @@ -2402,7 +2402,7 @@ func Test_checkTrain(t *testing.T) { common.IndexTypeKey: "IVF_FLAT", common.MetricTypeKey: "L2", } - assert.NoError(t, checkTrain(f, m)) + assert.NoError(t, checkTrain(context.TODO(), f, m)) }) } diff --git a/internal/proxy/task_upsert.go b/internal/proxy/task_upsert.go index 3ca4853fa9f53..26c7b6df163ce 100644 --- a/internal/proxy/task_upsert.go +++ b/internal/proxy/task_upsert.go @@ -148,7 +148,7 @@ func (it *upsertTask) OnEnqueue() error { func (it *upsertTask) insertPreExecute(ctx context.Context) error { collectionName := it.upsertMsg.InsertMsg.CollectionName if err := validateCollectionName(collectionName); err != nil { - log.Error("valid collection name failed", zap.String("collectionName", collectionName), zap.Error(err)) + log.Ctx(ctx).Error("valid collection name failed", zap.String("collectionName", collectionName), zap.Error(err)) return err } diff --git a/internal/proxy/util.go b/internal/proxy/util.go index 1468a283fbf7f..a986603205a8d 100644 --- a/internal/proxy/util.go +++ b/internal/proxy/util.go @@ -1183,7 +1183,7 @@ func passwordVerify(ctx context.Context, username, rawPwd string, globalMetaCach // meanwhile, generating Sha256Password depends on raw password and encrypted password will not cache. credInfo, err := globalMetaCache.GetCredentialInfo(ctx, username) if err != nil { - log.Error("found no credential", zap.String("username", username), zap.Error(err)) + log.Ctx(ctx).Error("found no credential", zap.String("username", username), zap.Error(err)) return false } @@ -1195,13 +1195,13 @@ func passwordVerify(ctx context.Context, username, rawPwd string, globalMetaCach // miss cache, verify against encrypted password from etcd if err := bcrypt.CompareHashAndPassword([]byte(credInfo.EncryptedPassword), []byte(rawPwd)); err != nil { - log.Error("Verify password failed", zap.Error(err)) + log.Ctx(ctx).Error("Verify password failed", zap.Error(err)) return false } // update cache after miss cache credInfo.Sha256Password = sha256Pwd - log.Debug("get credential miss cache, update cache with", zap.Any("credential", credInfo)) + log.Ctx(ctx).Debug("get credential miss cache, update cache with", zap.Any("credential", credInfo)) globalMetaCache.UpdateCredential(credInfo) return true } @@ -1613,7 +1613,7 @@ func getCollectionProgress( CollectionIDs: []int64{collectionID}, }) if err != nil { - log.Warn("fail to show collections", + log.Ctx(ctx).Warn("fail to show collections", zap.Int64("collectionID", collectionID), zap.Error(err), ) @@ -1622,7 +1622,7 @@ func getCollectionProgress( err = merr.Error(resp.GetStatus()) if err != nil { - log.Warn("fail to show collections", + log.Ctx(ctx).Warn("fail to show collections", zap.Int64("collectionID", collectionID), zap.Error(err)) return @@ -1667,7 +1667,7 @@ func getPartitionProgress( PartitionIDs: partitionIDs, }) if err != nil { - log.Warn("fail to show partitions", zap.Int64("collection_id", collectionID), + log.Ctx(ctx).Warn("fail to show partitions", zap.Int64("collection_id", collectionID), zap.String("collection_name", collectionName), zap.Strings("partition_names", partitionNames), zap.Error(err)) @@ -1677,7 +1677,7 @@ func getPartitionProgress( err = merr.Error(resp.GetStatus()) if err != nil { err = merr.Error(resp.GetStatus()) - log.Warn("fail to show partitions", + log.Ctx(ctx).Warn("fail to show partitions", zap.String("collectionName", collectionName), zap.Strings("partitionNames", partitionNames), zap.Error(err)) @@ -1822,7 +1822,7 @@ func checkDynamicFieldData(schema *schemapb.CollectionSchema, insertMsg *msgstre func SendReplicateMessagePack(ctx context.Context, replicateMsgStream msgstream.MsgStream, request interface{ GetBase() *commonpb.MsgBase }) { if replicateMsgStream == nil || request == nil { - log.Warn("replicate msg stream or request is nil", zap.Any("request", request)) + log.Ctx(ctx).Warn("replicate msg stream or request is nil", zap.Any("request", request)) return } msgBase := request.GetBase() diff --git a/internal/querycoordv2/handlers.go b/internal/querycoordv2/handlers.go index d770edc1e6fef..a3f12f281381b 100644 --- a/internal/querycoordv2/handlers.go +++ b/internal/querycoordv2/handlers.go @@ -127,7 +127,7 @@ func (s *Server) balanceSegments(ctx context.Context, actions = append(actions, releaseAction) } - t, err := task.NewSegmentTask(s.ctx, + t, err := task.NewSegmentTask(ctx, Params.QueryCoordCfg.SegmentTaskTimeout.GetAsDuration(time.Millisecond), utils.ManualBalance, collectionID, @@ -206,7 +206,7 @@ func (s *Server) balanceChannels(ctx context.Context, releaseAction := task.NewChannelAction(plan.From, task.ActionTypeReduce, plan.Channel.GetChannelName()) actions = append(actions, releaseAction) } - t, err := task.NewChannelTask(s.ctx, + t, err := task.NewChannelTask(ctx, Params.QueryCoordCfg.ChannelTaskTimeout.GetAsDuration(time.Millisecond), utils.ManualBalance, collectionID, diff --git a/internal/querycoordv2/ops_services.go b/internal/querycoordv2/ops_services.go index 9051f50bd93e3..d55f734d85e16 100644 --- a/internal/querycoordv2/ops_services.go +++ b/internal/querycoordv2/ops_services.go @@ -257,7 +257,7 @@ func (s *Server) TransferSegment(ctx context.Context, req *querypb.TransferSegme // check whether srcNode is healthy srcNode := req.GetSourceNodeID() - if err := s.isStoppingNode(srcNode); err != nil { + if err := s.isStoppingNode(ctx, srcNode); err != nil { err := merr.WrapErrNodeNotAvailable(srcNode, "the source node is invalid") return merr.Status(err), nil } @@ -270,7 +270,7 @@ func (s *Server) TransferSegment(ctx context.Context, req *querypb.TransferSegme dstNodeSet.Insert(replica.GetRWNodes()...) } else { // check whether dstNode is healthy - if err := s.isStoppingNode(req.GetTargetNodeID()); err != nil { + if err := s.isStoppingNode(ctx, req.GetTargetNodeID()); err != nil { err := merr.WrapErrNodeNotAvailable(srcNode, "the target node is invalid") return merr.Status(err), nil } @@ -329,7 +329,7 @@ func (s *Server) TransferChannel(ctx context.Context, req *querypb.TransferChann // check whether srcNode is healthy srcNode := req.GetSourceNodeID() - if err := s.isStoppingNode(srcNode); err != nil { + if err := s.isStoppingNode(ctx, srcNode); err != nil { err := merr.WrapErrNodeNotAvailable(srcNode, "the source node is invalid") return merr.Status(err), nil } @@ -342,7 +342,7 @@ func (s *Server) TransferChannel(ctx context.Context, req *querypb.TransferChann dstNodeSet.Insert(replica.GetRWNodes()...) } else { // check whether dstNode is healthy - if err := s.isStoppingNode(req.GetTargetNodeID()); err != nil { + if err := s.isStoppingNode(ctx, req.GetTargetNodeID()); err != nil { err := merr.WrapErrNodeNotAvailable(srcNode, "the target node is invalid") return merr.Status(err), nil } diff --git a/internal/querycoordv2/services.go b/internal/querycoordv2/services.go index d764dcaddba01..ff94adc961e4f 100644 --- a/internal/querycoordv2/services.go +++ b/internal/querycoordv2/services.go @@ -56,8 +56,9 @@ var ( ) func (s *Server) ShowCollections(ctx context.Context, req *querypb.ShowCollectionsRequest) (*querypb.ShowCollectionsResponse, error) { - log.Ctx(ctx).Info("show collections request received", zap.Int64s("collections", req.GetCollectionIDs())) + log := log.Ctx(ctx).With(zap.Int64s("collections", req.GetCollectionIDs())) + log.Info("show collections request received") if err := merr.CheckHealthy(s.State()); err != nil { msg := "failed to show collections" log.Warn(msg, zap.Error(err)) @@ -687,15 +688,15 @@ func (s *Server) refreshCollection(ctx context.Context, collectionID int64) erro // } // } -func (s *Server) isStoppingNode(nodeID int64) error { +func (s *Server) isStoppingNode(ctx context.Context, nodeID int64) error { isStopping, err := s.nodeMgr.IsStoppingNode(nodeID) if err != nil { - log.Warn("fail to check whether the node is stopping", zap.Int64("node_id", nodeID), zap.Error(err)) + log.Ctx(ctx).Warn("fail to check whether the node is stopping", zap.Int64("node_id", nodeID), zap.Error(err)) return err } if isStopping { msg := fmt.Sprintf("failed to balance due to the source/destination node[%d] is stopping", nodeID) - log.Warn(msg) + log.Ctx(ctx).Warn(msg) return errors.New(msg) } return nil @@ -738,7 +739,7 @@ func (s *Server) LoadBalance(ctx context.Context, req *querypb.LoadBalanceReques log.Warn(msg) return merr.Status(err), nil } - if err := s.isStoppingNode(srcNode); err != nil { + if err := s.isStoppingNode(ctx, srcNode); err != nil { return merr.Status(errors.Wrap(err, fmt.Sprintf("can't balance, because the source node[%d] is invalid", srcNode))), nil } @@ -760,7 +761,7 @@ func (s *Server) LoadBalance(ctx context.Context, req *querypb.LoadBalanceReques // check whether dstNode is healthy for dstNode := range dstNodeSet { - if err := s.isStoppingNode(dstNode); err != nil { + if err := s.isStoppingNode(ctx, dstNode); err != nil { return merr.Status(errors.Wrap(err, fmt.Sprintf("can't balance, because the destination node[%d] is invalid", dstNode))), nil } @@ -924,7 +925,7 @@ func (s *Server) CheckHealth(ctx context.Context, req *milvuspb.CheckHealthReque } if err := utils.CheckCollectionsQueryable(ctx, s.meta, s.targetMgr, s.dist, s.nodeMgr); err != nil { - log.Warn("some collection is not queryable during health check", zap.Error(err)) + log.Ctx(ctx).Warn("some collection is not queryable during health check", zap.Error(err)) } return componentutil.CheckHealthRespWithErr(nil), nil diff --git a/internal/querycoordv2/task/scheduler.go b/internal/querycoordv2/task/scheduler.go index cde0c3d43df34..d34a4eeaeb848 100644 --- a/internal/querycoordv2/task/scheduler.go +++ b/internal/querycoordv2/task/scheduler.go @@ -242,7 +242,7 @@ func (scheduler *taskScheduler) AddExecutor(nodeID int64) { scheduler.executors[nodeID] = executor executor.Start(scheduler.ctx) - log.Info("add executor for new QueryNode", zap.Int64("nodeID", nodeID)) + log.Ctx(scheduler.ctx).Info("add executor for new QueryNode", zap.Int64("nodeID", nodeID)) } func (scheduler *taskScheduler) RemoveExecutor(nodeID int64) { @@ -253,7 +253,7 @@ func (scheduler *taskScheduler) RemoveExecutor(nodeID int64) { if ok { executor.Stop() delete(scheduler.executors, nodeID) - log.Info("remove executor of offline QueryNode", zap.Int64("nodeID", nodeID)) + log.Ctx(scheduler.ctx).Info("remove executor of offline QueryNode", zap.Int64("nodeID", nodeID)) } } @@ -334,7 +334,7 @@ func (scheduler *taskScheduler) preAdd(task Task) error { index := NewReplicaSegmentIndex(task) if old, ok := scheduler.segmentTasks[index]; ok { if task.Priority() > old.Priority() { - log.Info("replace old task, the new one with higher priority", + log.Ctx(scheduler.ctx).Info("replace old task, the new one with higher priority", zap.Int64("oldID", old.ID()), zap.String("oldPriority", old.Priority().String()), zap.Int64("newID", task.ID()), @@ -367,7 +367,7 @@ func (scheduler *taskScheduler) preAdd(task Task) error { index := replicaChannelIndex{task.ReplicaID(), task.Channel()} if old, ok := scheduler.channelTasks[index]; ok { if task.Priority() > old.Priority() { - log.Info("replace old task, the new one with higher priority", + log.Ctx(scheduler.ctx).Info("replace old task, the new one with higher priority", zap.Int64("oldID", old.ID()), zap.String("oldPriority", old.Priority().String()), zap.Int64("newID", task.ID()), @@ -400,7 +400,7 @@ func (scheduler *taskScheduler) preAdd(task Task) error { index := NewReplicaLeaderIndex(task) if old, ok := scheduler.segmentTasks[index]; ok { if task.Priority() > old.Priority() { - log.Info("replace old task, the new one with higher priority", + log.Ctx(scheduler.ctx).Info("replace old task, the new one with higher priority", zap.Int64("oldID", old.ID()), zap.String("oldPriority", old.Priority().String()), zap.Int64("newID", task.ID()), @@ -428,7 +428,7 @@ func (scheduler *taskScheduler) tryPromoteAll() { if err != nil { task.Cancel(err) toRemove = append(toRemove, task) - log.Warn("failed to promote task", + log.Ctx(scheduler.ctx).Warn("failed to promote task", zap.Int64("taskID", task.ID()), zap.Error(err), ) @@ -447,14 +447,14 @@ func (scheduler *taskScheduler) tryPromoteAll() { } if len(toPromote) > 0 || len(toRemove) > 0 { - log.Debug("promoted tasks", + log.Ctx(scheduler.ctx).Debug("promoted tasks", zap.Int("promotedNum", len(toPromote)), zap.Int("toRemoveNum", len(toRemove))) } } func (scheduler *taskScheduler) promote(task Task) error { - log := log.With( + log := log.Ctx(scheduler.ctx).With( zap.Int64("taskID", task.ID()), zap.Int64("collectionID", task.CollectionID()), zap.Int64("replicaID", task.ReplicaID()), @@ -474,7 +474,7 @@ func (scheduler *taskScheduler) promote(task Task) error { func (scheduler *taskScheduler) Dispatch(node int64) { select { case <-scheduler.ctx.Done(): - log.Info("scheduler stopped") + log.Ctx(scheduler.ctx).Info("scheduler stopped") default: scheduler.rwmutex.Lock() @@ -629,7 +629,7 @@ func (scheduler *taskScheduler) GetTasksJSON() string { tasks := scheduler.taskStats.Values() ret, err := json.Marshal(tasks) if err != nil { - log.Warn("marshal tasks fail", zap.Error(err)) + log.Ctx(scheduler.ctx).Warn("marshal tasks fail", zap.Error(err)) return "" } return string(ret) @@ -644,7 +644,7 @@ func (scheduler *taskScheduler) schedule(node int64) { return } - log := log.With( + log := log.Ctx(scheduler.ctx).With( zap.Int64("nodeID", node), ) @@ -784,7 +784,7 @@ func (scheduler *taskScheduler) preProcess(task Task) bool { // process processes the given task, // return true if the task is started and succeeds to commit the current action func (scheduler *taskScheduler) process(task Task) bool { - log := log.With( + log := log.Ctx(scheduler.ctx).With( zap.Int64("taskID", task.ID()), zap.Int64("collectionID", task.CollectionID()), zap.Int64("replicaID", task.ReplicaID()), @@ -830,7 +830,7 @@ func (scheduler *taskScheduler) RemoveByNode(node int64) { } func (scheduler *taskScheduler) recordSegmentTaskError(task *SegmentTask) { - log.Warn("task scheduler recordSegmentTaskError", + log.Ctx(scheduler.ctx).Warn("task scheduler recordSegmentTaskError", zap.Int64("taskID", task.ID()), zap.Int64("collectionID", task.CollectionID()), zap.Int64("replicaID", task.ReplicaID()), @@ -928,7 +928,7 @@ func (scheduler *taskScheduler) getTaskMetricsLabel(task Task) string { } func (scheduler *taskScheduler) checkStale(task Task) error { - log := log.With( + log := log.Ctx(task.Context()).With( zap.Int64("taskID", task.ID()), zap.Int64("collectionID", task.CollectionID()), zap.Int64("replicaID", task.ReplicaID()), @@ -970,7 +970,7 @@ func (scheduler *taskScheduler) checkStale(task Task) error { } func (scheduler *taskScheduler) checkSegmentTaskStale(task *SegmentTask) error { - log := log.With( + log := log.Ctx(task.Context()).With( zap.Int64("taskID", task.ID()), zap.Int64("collectionID", task.CollectionID()), zap.Int64("replicaID", task.ReplicaID()), @@ -1013,7 +1013,7 @@ func (scheduler *taskScheduler) checkSegmentTaskStale(task *SegmentTask) error { } func (scheduler *taskScheduler) checkChannelTaskStale(task *ChannelTask) error { - log := log.With( + log := log.Ctx(task.Context()).With( zap.Int64("taskID", task.ID()), zap.Int64("collectionID", task.CollectionID()), zap.Int64("replicaID", task.ReplicaID()), @@ -1041,7 +1041,7 @@ func (scheduler *taskScheduler) checkChannelTaskStale(task *ChannelTask) error { } func (scheduler *taskScheduler) checkLeaderTaskStale(task *LeaderTask) error { - log := log.With( + log := log.Ctx(task.Context()).With( zap.Int64("taskID", task.ID()), zap.Int64("collectionID", task.CollectionID()), zap.Int64("replicaID", task.ReplicaID()), diff --git a/internal/querynodev2/services.go b/internal/querynodev2/services.go index 95cae6c93c46f..ca9f953120ccf 100644 --- a/internal/querynodev2/services.go +++ b/internal/querynodev2/services.go @@ -70,7 +70,7 @@ func (node *QueryNode) GetComponentStates(ctx context.Context, req *milvuspb.Get if node.session != nil && node.session.Registered() { nodeID = node.GetNodeID() } - log.Debug("QueryNode current state", zap.Int64("NodeID", nodeID), zap.String("StateCode", code.String())) + log.Ctx(ctx).Debug("QueryNode current state", zap.Int64("NodeID", nodeID), zap.String("StateCode", code.String())) info := &milvuspb.ComponentInfo{ NodeID: nodeID, @@ -162,7 +162,7 @@ func (node *QueryNode) GetStatistics(ctx context.Context, req *querypb.GetStatis return ret, nil } -func (node *QueryNode) composeIndexMeta(indexInfos []*indexpb.IndexInfo, schema *schemapb.CollectionSchema) *segcorepb.CollectionIndexMeta { +func (node *QueryNode) composeIndexMeta(ctx context.Context, indexInfos []*indexpb.IndexInfo, schema *schemapb.CollectionSchema) *segcorepb.CollectionIndexMeta { fieldIndexMetas := make([]*segcorepb.FieldIndexMeta, 0) for _, info := range indexInfos { fieldIndexMetas = append(fieldIndexMetas, &segcorepb.FieldIndexMeta{ @@ -178,7 +178,7 @@ func (node *QueryNode) composeIndexMeta(indexInfos []*indexpb.IndexInfo, schema sizePerRecord, err := typeutil.EstimateSizePerRecord(schema) maxIndexRecordPerSegment := int64(0) if err != nil || sizePerRecord == 0 { - log.Warn("failed to transfer segment size to collection, because failed to estimate size per record", zap.Error(err)) + log.Ctx(ctx).Warn("failed to transfer segment size to collection, because failed to estimate size per record", zap.Error(err)) } else { threshold := paramtable.Get().DataCoordCfg.SegmentMaxSize.GetAsFloat() * 1024 * 1024 proportion := paramtable.Get().DataCoordCfg.SegmentSealProportion.GetAsFloat() @@ -239,7 +239,7 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, req *querypb.WatchDm } node.manager.Collection.PutOrRef(req.GetCollectionID(), req.GetSchema(), - node.composeIndexMeta(req.GetIndexInfoList(), req.Schema), req.GetLoadMeta()) + node.composeIndexMeta(ctx, req.GetIndexInfoList(), req.Schema), req.GetLoadMeta()) defer func() { if !merr.Ok(status) { node.manager.Collection.Unref(req.GetCollectionID(), 1) @@ -476,7 +476,7 @@ func (node *QueryNode) LoadSegments(ctx context.Context, req *querypb.LoadSegmen } node.manager.Collection.PutOrRef(req.GetCollectionID(), req.GetSchema(), - node.composeIndexMeta(req.GetIndexInfoList(), req.GetSchema()), req.GetLoadMeta()) + node.composeIndexMeta(ctx, req.GetIndexInfoList(), req.GetSchema()), req.GetLoadMeta()) defer node.manager.Collection.Unref(req.GetCollectionID(), 1) if req.GetLoadScope() == querypb.LoadScope_Delta { @@ -1068,7 +1068,7 @@ func (node *QueryNode) SyncReplicaSegments(ctx context.Context, req *querypb.Syn // ShowConfigurations returns the configurations of queryNode matching req.Pattern func (node *QueryNode) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) { if err := node.lifetime.Add(merr.IsHealthy); err != nil { - log.Warn("QueryNode.ShowConfigurations failed", + log.Ctx(ctx).Warn("QueryNode.ShowConfigurations failed", zap.Int64("nodeId", node.GetNodeID()), zap.String("req", req.Pattern), zap.Error(err)) @@ -1098,7 +1098,7 @@ func (node *QueryNode) ShowConfigurations(ctx context.Context, req *internalpb.S // GetMetrics return system infos of the query node, such as total memory, memory usage, cpu usage ... func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) { if err := node.lifetime.Add(merr.IsHealthy); err != nil { - log.Warn("QueryNode.GetMetrics failed", + log.Ctx(ctx).Warn("QueryNode.GetMetrics failed", zap.Int64("nodeId", node.GetNodeID()), zap.String("req", req.Request), zap.Error(err)) diff --git a/internal/querynodev2/services_test.go b/internal/querynodev2/services_test.go index 3f6364a217c62..7ca5a58dc9902 100644 --- a/internal/querynodev2/services_test.go +++ b/internal/querynodev2/services_test.go @@ -1267,7 +1267,7 @@ func (suite *ServiceSuite) TestSearch_Failed() { CollectionID: suite.collectionID, PartitionIDs: suite.partitionIDs, } - indexMeta := suite.node.composeIndexMeta(segments.GenTestIndexInfoList(suite.collectionID, schema), schema) + indexMeta := suite.node.composeIndexMeta(ctx, segments.GenTestIndexInfoList(suite.collectionID, schema), schema) suite.node.manager.Collection.PutOrRef(suite.collectionID, schema, indexMeta, LoadMeta) // Delegator not found diff --git a/internal/rootcoord/alter_collection_task.go b/internal/rootcoord/alter_collection_task.go index f0887f2cf82d5..7a08b00bbf6b2 100644 --- a/internal/rootcoord/alter_collection_task.go +++ b/internal/rootcoord/alter_collection_task.go @@ -54,13 +54,13 @@ func (a *alterCollectionTask) Execute(ctx context.Context) error { oldColl, err := a.core.meta.GetCollectionByName(ctx, a.Req.GetDbName(), a.Req.GetCollectionName(), a.ts) if err != nil { - log.Warn("get collection failed during changing collection state", + log.Ctx(ctx).Warn("get collection failed during changing collection state", zap.String("collectionName", a.Req.GetCollectionName()), zap.Uint64("ts", a.ts)) return err } if ContainsKeyPairArray(a.Req.GetProperties(), oldColl.Properties) { - log.Info("skip to alter collection due to no changes were detected in the properties", zap.Int64("collectionID", oldColl.CollectionID)) + log.Ctx(ctx).Info("skip to alter collection due to no changes were detected in the properties", zap.Int64("collectionID", oldColl.CollectionID)) return nil } @@ -115,7 +115,7 @@ func (a *alterCollectionTask) Execute(ctx context.Context) error { ResourceGroups: newResourceGroups, }) if err := merr.CheckRPCCall(resp, err); err != nil { - log.Warn("failed to trigger update load config for collection", zap.Int64("collectionID", newColl.CollectionID), zap.Error(err)) + log.Ctx(ctx).Warn("failed to trigger update load config for collection", zap.Int64("collectionID", newColl.CollectionID), zap.Error(err)) return nil, err } return nil, nil diff --git a/internal/rootcoord/alter_database_task.go b/internal/rootcoord/alter_database_task.go index f87ec79d177dc..d9ab2d9e0cf3c 100644 --- a/internal/rootcoord/alter_database_task.go +++ b/internal/rootcoord/alter_database_task.go @@ -95,7 +95,7 @@ func (a *alterDatabaseTask) Execute(ctx context.Context) error { redoTask.AddAsyncStep(NewSimpleStep("", func(ctx context.Context) ([]nestedStep, error) { colls, err := a.core.meta.ListCollections(ctx, oldDB.Name, a.ts, true) if err != nil { - log.Warn("failed to trigger update load config for database", zap.Int64("dbID", oldDB.ID), zap.Error(err)) + log.Ctx(ctx).Warn("failed to trigger update load config for database", zap.Int64("dbID", oldDB.ID), zap.Error(err)) return nil, err } if len(colls) == 0 { @@ -108,7 +108,7 @@ func (a *alterDatabaseTask) Execute(ctx context.Context) error { ResourceGroups: newResourceGroups, }) if err := merr.CheckRPCCall(resp, err); err != nil { - log.Warn("failed to trigger update load config for database", zap.Int64("dbID", oldDB.ID), zap.Error(err)) + log.Ctx(ctx).Warn("failed to trigger update load config for database", zap.Int64("dbID", oldDB.ID), zap.Error(err)) return nil, err } return nil, nil diff --git a/internal/rootcoord/broker.go b/internal/rootcoord/broker.go index c4811b3550a9a..563e41a78af66 100644 --- a/internal/rootcoord/broker.go +++ b/internal/rootcoord/broker.go @@ -225,7 +225,7 @@ func (b *ServerBroker) GetSegmentIndexState(ctx context.Context, collID UniqueID } func (b *ServerBroker) BroadcastAlteredCollection(ctx context.Context, req *milvuspb.AlterCollectionRequest) error { - log.Info("broadcasting request to alter collection", zap.String("collectionName", req.GetCollectionName()), zap.Int64("collectionID", req.GetCollectionID()), zap.Any("props", req.GetProperties())) + log.Ctx(ctx).Info("broadcasting request to alter collection", zap.String("collectionName", req.GetCollectionName()), zap.Int64("collectionID", req.GetCollectionID()), zap.Any("props", req.GetProperties())) colMeta, err := b.s.meta.GetCollectionByID(ctx, req.GetDbName(), req.GetCollectionID(), typeutil.MaxTimestamp, false) if err != nil { @@ -265,7 +265,7 @@ func (b *ServerBroker) BroadcastAlteredCollection(ctx context.Context, req *milv if resp.ErrorCode != commonpb.ErrorCode_Success { return errors.New(resp.Reason) } - log.Info("done to broadcast request to alter collection", zap.String("collectionName", req.GetCollectionName()), zap.Int64("collectionID", req.GetCollectionID()), zap.Any("props", req.GetProperties())) + log.Ctx(ctx).Info("done to broadcast request to alter collection", zap.String("collectionName", req.GetCollectionName()), zap.Int64("collectionID", req.GetCollectionID()), zap.Any("props", req.GetProperties())) return nil } diff --git a/internal/rootcoord/create_collection_task.go b/internal/rootcoord/create_collection_task.go index a8546b7f07845..441c1a9a5d825 100644 --- a/internal/rootcoord/create_collection_task.go +++ b/internal/rootcoord/create_collection_task.go @@ -63,7 +63,7 @@ type createCollectionTask struct { partitionNames []string } -func (t *createCollectionTask) validate() error { +func (t *createCollectionTask) validate(ctx context.Context) error { if t.Req == nil { return errors.New("empty requests") } @@ -86,7 +86,7 @@ func (t *createCollectionTask) validate() error { // 2. check db-collection capacity db2CollIDs := t.core.meta.ListAllAvailCollections(t.ctx) - if err := t.checkMaxCollectionsPerDB(db2CollIDs); err != nil { + if err := t.checkMaxCollectionsPerDB(ctx, db2CollIDs); err != nil { return err } @@ -98,7 +98,7 @@ func (t *createCollectionTask) validate() error { maxCollectionNum := Params.QuotaConfig.MaxCollectionNum.GetAsInt() if totalCollections >= maxCollectionNum { - log.Warn("unable to create collection because the number of collection has reached the limit", zap.Int("max_collection_num", maxCollectionNum)) + log.Ctx(ctx).Warn("unable to create collection because the number of collection has reached the limit", zap.Int("max_collection_num", maxCollectionNum)) return merr.WrapErrCollectionNumLimitExceeded(t.Req.GetDbName(), maxCollectionNum) } @@ -111,22 +111,22 @@ func (t *createCollectionTask) validate() error { } // checkMaxCollectionsPerDB DB properties take precedence over quota configurations for max collections. -func (t *createCollectionTask) checkMaxCollectionsPerDB(db2CollIDs map[int64][]int64) error { +func (t *createCollectionTask) checkMaxCollectionsPerDB(ctx context.Context, db2CollIDs map[int64][]int64) error { collIDs, ok := db2CollIDs[t.dbID] if !ok { - log.Warn("can not found DB ID", zap.String("collection", t.Req.GetCollectionName()), zap.String("dbName", t.Req.GetDbName())) + log.Ctx(ctx).Warn("can not found DB ID", zap.String("collection", t.Req.GetCollectionName()), zap.String("dbName", t.Req.GetDbName())) return merr.WrapErrDatabaseNotFound(t.Req.GetDbName(), "failed to create collection") } db, err := t.core.meta.GetDatabaseByName(t.ctx, t.Req.GetDbName(), typeutil.MaxTimestamp) if err != nil { - log.Warn("can not found DB ID", zap.String("collection", t.Req.GetCollectionName()), zap.String("dbName", t.Req.GetDbName())) + log.Ctx(ctx).Warn("can not found DB ID", zap.String("collection", t.Req.GetCollectionName()), zap.String("dbName", t.Req.GetDbName())) return merr.WrapErrDatabaseNotFound(t.Req.GetDbName(), "failed to create collection") } check := func(maxColNumPerDB int) error { if len(collIDs) >= maxColNumPerDB { - log.Warn("unable to create collection because the number of collection has reached the limit in DB", zap.Int("maxCollectionNumPerDB", maxColNumPerDB)) + log.Ctx(ctx).Warn("unable to create collection because the number of collection has reached the limit in DB", zap.Int("maxCollectionNumPerDB", maxColNumPerDB)) return merr.WrapErrCollectionNumLimitExceeded(t.Req.GetDbName(), maxColNumPerDB) } return nil @@ -136,7 +136,7 @@ func (t *createCollectionTask) checkMaxCollectionsPerDB(db2CollIDs map[int64][]i if maxColNumPerDBStr != "" { maxColNumPerDB, err := strconv.Atoi(maxColNumPerDBStr) if err != nil { - log.Warn("parse value of property fail", zap.String("key", common.DatabaseMaxCollectionsKey), + log.Ctx(ctx).Warn("parse value of property fail", zap.String("key", common.DatabaseMaxCollectionsKey), zap.String("value", maxColNumPerDBStr), zap.Error(err)) return fmt.Errorf(fmt.Sprintf("parse value of property fail, key:%s, value:%s", common.DatabaseMaxCollectionsKey, maxColNumPerDBStr)) } @@ -243,10 +243,10 @@ func validateFieldDataType(schema *schemapb.CollectionSchema) error { return nil } -func (t *createCollectionTask) validateSchema(schema *schemapb.CollectionSchema) error { - log.With(zap.String("CollectionName", t.Req.CollectionName)) +func (t *createCollectionTask) validateSchema(ctx context.Context, schema *schemapb.CollectionSchema) error { + log.Ctx(ctx).With(zap.String("CollectionName", t.Req.CollectionName)) if t.Req.GetCollectionName() != schema.GetName() { - log.Error("collection name not matches schema name", zap.String("SchemaName", schema.Name)) + log.Ctx(ctx).Error("collection name not matches schema name", zap.String("SchemaName", schema.Name)) msg := fmt.Sprintf("collection name = %s, schema.Name=%s", t.Req.GetCollectionName(), schema.Name) return merr.WrapErrParameterInvalid("collection name matches schema name", "don't match", msg) } @@ -256,7 +256,7 @@ func (t *createCollectionTask) validateSchema(schema *schemapb.CollectionSchema) } if hasSystemFields(schema, []string{RowIDFieldName, TimeStampFieldName, MetaFieldName}) { - log.Error("schema contains system field", + log.Ctx(ctx).Error("schema contains system field", zap.String("RowIDFieldName", RowIDFieldName), zap.String("TimeStampFieldName", TimeStampFieldName), zap.String("MetaFieldName", MetaFieldName)) @@ -296,7 +296,7 @@ func (t *createCollectionTask) assignFieldAndFunctionID(schema *schemapb.Collect return nil } -func (t *createCollectionTask) appendDynamicField(schema *schemapb.CollectionSchema) { +func (t *createCollectionTask) appendDynamicField(ctx context.Context, schema *schemapb.CollectionSchema) { if schema.EnableDynamicField { schema.Fields = append(schema.Fields, &schemapb.FieldSchema{ Name: MetaFieldName, @@ -304,7 +304,7 @@ func (t *createCollectionTask) appendDynamicField(schema *schemapb.CollectionSch DataType: schemapb.DataType_JSON, IsDynamic: true, }) - log.Info("append dynamic field", zap.String("collection", schema.Name)) + log.Ctx(ctx).Info("append dynamic field", zap.String("collection", schema.Name)) } } @@ -325,15 +325,15 @@ func (t *createCollectionTask) appendSysFields(schema *schemapb.CollectionSchema }) } -func (t *createCollectionTask) prepareSchema() error { +func (t *createCollectionTask) prepareSchema(ctx context.Context) error { var schema schemapb.CollectionSchema if err := proto.Unmarshal(t.Req.GetSchema(), &schema); err != nil { return err } - if err := t.validateSchema(&schema); err != nil { + if err := t.validateSchema(ctx, &schema); err != nil { return err } - t.appendDynamicField(&schema) + t.appendDynamicField(ctx, &schema) if err := t.assignFieldAndFunctionID(&schema); err != nil { return err @@ -356,7 +356,7 @@ func (t *createCollectionTask) assignCollectionID() error { return err } -func (t *createCollectionTask) assignPartitionIDs() error { +func (t *createCollectionTask) assignPartitionIDs(ctx context.Context) error { t.partitionNames = make([]string, 0) defaultPartitionName := Params.CommonCfg.DefaultPartitionName.GetValue() @@ -391,7 +391,7 @@ func (t *createCollectionTask) assignPartitionIDs() error { for i := start; i < end; i++ { t.partIDs[i-start] = i } - log.Info("assign partitions when create collection", + log.Ctx(ctx).Info("assign partitions when create collection", zap.String("collectionName", t.Req.GetCollectionName()), zap.Strings("partitionNames", t.partitionNames)) @@ -425,11 +425,11 @@ func (t *createCollectionTask) Prepare(ctx context.Context) error { } t.dbID = db.ID - if err := t.validate(); err != nil { + if err := t.validate(ctx); err != nil { return err } - if err := t.prepareSchema(); err != nil { + if err := t.prepareSchema(ctx); err != nil { return err } @@ -439,7 +439,7 @@ func (t *createCollectionTask) Prepare(ctx context.Context) error { return err } - if err := t.assignPartitionIDs(); err != nil { + if err := t.assignPartitionIDs(ctx); err != nil { return err } @@ -527,13 +527,13 @@ func (t *createCollectionTask) broadcastCreateCollectionMsgIntoStreamingService( return startPositions, nil } -func (t *createCollectionTask) getCreateTs() (uint64, error) { +func (t *createCollectionTask) getCreateTs(ctx context.Context) (uint64, error) { replicateInfo := t.Req.GetBase().GetReplicateInfo() if !replicateInfo.GetIsReplicate() { return t.GetTs(), nil } if replicateInfo.GetMsgTimestamp() == 0 { - log.Warn("the cdc timestamp is not set in the request for the backup instance") + log.Ctx(ctx).Warn("the cdc timestamp is not set in the request for the backup instance") return 0, merr.WrapErrParameterInvalidMsg("the cdc timestamp is not set in the request for the backup instance") } return replicateInfo.GetMsgTimestamp(), nil @@ -542,7 +542,7 @@ func (t *createCollectionTask) getCreateTs() (uint64, error) { func (t *createCollectionTask) Execute(ctx context.Context) error { collID := t.collID partIDs := t.partIDs - ts, err := t.getCreateTs() + ts, err := t.getCreateTs(ctx) if err != nil { return err } @@ -592,7 +592,7 @@ func (t *createCollectionTask) Execute(ctx context.Context) error { return fmt.Errorf("create duplicate collection with different parameters, collection: %s", t.Req.GetCollectionName()) } // make creating collection idempotent. - log.Warn("add duplicate collection", zap.String("collection", t.Req.GetCollectionName()), zap.Uint64("ts", ts)) + log.Ctx(ctx).Warn("add duplicate collection", zap.String("collection", t.Req.GetCollectionName()), zap.Uint64("ts", ts)) return nil } diff --git a/internal/rootcoord/create_collection_task_test.go b/internal/rootcoord/create_collection_task_test.go index 519ef67a61856..7980044793983 100644 --- a/internal/rootcoord/create_collection_task_test.go +++ b/internal/rootcoord/create_collection_task_test.go @@ -47,7 +47,7 @@ func Test_createCollectionTask_validate(t *testing.T) { task := createCollectionTask{ Req: nil, } - err := task.validate() + err := task.validate(context.TODO()) assert.Error(t, err) }) @@ -57,7 +57,7 @@ func Test_createCollectionTask_validate(t *testing.T) { } { task.SetTs(1000) - ts, err := task.getCreateTs() + ts, err := task.getCreateTs(context.TODO()) assert.NoError(t, err) assert.EqualValues(t, 1000, ts) } @@ -72,14 +72,14 @@ func Test_createCollectionTask_validate(t *testing.T) { } { task.SetTs(1000) - _, err := task.getCreateTs() + _, err := task.getCreateTs(context.TODO()) assert.Error(t, err) err = task.Execute(context.Background()) assert.Error(t, err) } { task.Req.Base.ReplicateInfo.MsgTimestamp = 2000 - ts, err := task.getCreateTs() + ts, err := task.getCreateTs(context.TODO()) assert.NoError(t, err) assert.EqualValues(t, 2000, ts) } @@ -91,7 +91,7 @@ func Test_createCollectionTask_validate(t *testing.T) { Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}, }, } - err := task.validate() + err := task.validate(context.TODO()) assert.Error(t, err) }) @@ -104,7 +104,7 @@ func Test_createCollectionTask_validate(t *testing.T) { ShardsNum: cfgMaxShardNum + 1, }, } - err := task.validate() + err := task.validate(context.TODO()) assert.Error(t, err) }) @@ -117,7 +117,7 @@ func Test_createCollectionTask_validate(t *testing.T) { ShardsNum: cfgShardLimit + 1, }, } - err := task.validate() + err := task.validate(context.TODO()) assert.Error(t, err) }) @@ -140,7 +140,7 @@ func Test_createCollectionTask_validate(t *testing.T) { Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, }, } - err := task.validate() + err := task.validate(context.TODO()) assert.Error(t, err) task = createCollectionTask{ @@ -150,7 +150,7 @@ func Test_createCollectionTask_validate(t *testing.T) { }, dbID: util.DefaultDBID, } - err = task.validate() + err = task.validate(context.TODO()) assert.Error(t, err) }) @@ -181,7 +181,7 @@ func Test_createCollectionTask_validate(t *testing.T) { }, dbID: util.DefaultDBID, } - err := task.validate() + err := task.validate(context.TODO()) assert.Error(t, err) // invalid properties @@ -204,7 +204,7 @@ func Test_createCollectionTask_validate(t *testing.T) { dbID: util.DefaultDBID, } - err = task.validate() + err = task.validate(context.TODO()) assert.Error(t, err) }) @@ -224,7 +224,7 @@ func Test_createCollectionTask_validate(t *testing.T) { Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection}, }, } - err := task.validate() + err := task.validate(context.TODO()) assert.Error(t, err) task = createCollectionTask{ @@ -234,7 +234,7 @@ func Test_createCollectionTask_validate(t *testing.T) { }, dbID: util.DefaultDBID, } - err = task.validate() + err = task.validate(context.TODO()) assert.Error(t, err) }) @@ -275,7 +275,7 @@ func Test_createCollectionTask_validate(t *testing.T) { }, dbID: util.DefaultDBID, } - err := task.validate() + err := task.validate(context.TODO()) assert.ErrorIs(t, err, merr.ErrGeneralCapacityExceeded) }) @@ -312,7 +312,7 @@ func Test_createCollectionTask_validate(t *testing.T) { paramtable.Get().Save(Params.QuotaConfig.MaxCollectionNum.Key, strconv.Itoa(math.MaxInt64)) defer paramtable.Get().Reset(Params.QuotaConfig.MaxCollectionNum.Key) - err := task.validate() + err := task.validate(context.TODO()) assert.NoError(t, err) }) } @@ -330,7 +330,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { schema := &schemapb.CollectionSchema{ Name: otherName, } - err := task.validateSchema(schema) + err := task.validateSchema(context.TODO(), schema) assert.Error(t, err) }) @@ -352,7 +352,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { }, }, } - err := task.validateSchema(schema) + err := task.validateSchema(context.TODO(), schema) assert.Error(t, err) }) @@ -378,7 +378,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { }, }, } - err := task.validateSchema(schema) + err := task.validateSchema(context.TODO(), schema) assert.Error(t, err) }) @@ -396,7 +396,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { {Name: RowIDFieldName}, }, } - err := task.validateSchema(schema) + err := task.validateSchema(context.TODO(), schema) assert.Error(t, err) }) @@ -421,7 +421,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { }, }, } - err := task.validateSchema(schema) + err := task.validateSchema(context.TODO(), schema) assert.ErrorIs(t, err, merr.ErrParameterInvalid) schema1 := &schemapb.CollectionSchema{ @@ -437,7 +437,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { }, }, } - err1 := task.validateSchema(schema1) + err1 := task.validateSchema(context.TODO(), schema1) assert.ErrorIs(t, err1, merr.ErrParameterInvalid) schema2 := &schemapb.CollectionSchema{ @@ -453,7 +453,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { }, }, } - err2 := task.validateSchema(schema2) + err2 := task.validateSchema(context.TODO(), schema2) assert.ErrorIs(t, err2, merr.ErrParameterInvalid) schema3 := &schemapb.CollectionSchema{ @@ -469,7 +469,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { }, }, } - err3 := task.validateSchema(schema3) + err3 := task.validateSchema(context.TODO(), schema3) assert.ErrorIs(t, err3, merr.ErrParameterInvalid) schema4 := &schemapb.CollectionSchema{ @@ -485,7 +485,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { }, }, } - err4 := task.validateSchema(schema4) + err4 := task.validateSchema(context.TODO(), schema4) assert.ErrorIs(t, err4, merr.ErrParameterInvalid) schema5 := &schemapb.CollectionSchema{ @@ -501,7 +501,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { }, }, } - err5 := task.validateSchema(schema5) + err5 := task.validateSchema(context.TODO(), schema5) assert.ErrorIs(t, err5, merr.ErrParameterInvalid) schema6 := &schemapb.CollectionSchema{ @@ -517,7 +517,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { }, }, } - err6 := task.validateSchema(schema6) + err6 := task.validateSchema(context.TODO(), schema6) assert.ErrorIs(t, err6, merr.ErrParameterInvalid) schema7 := &schemapb.CollectionSchema{ @@ -533,7 +533,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { }, }, } - err7 := task.validateSchema(schema7) + err7 := task.validateSchema(context.TODO(), schema7) assert.ErrorIs(t, err7, merr.ErrParameterInvalid) schema8 := &schemapb.CollectionSchema{ @@ -549,7 +549,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { }, }, } - err8 := task.validateSchema(schema8) + err8 := task.validateSchema(context.TODO(), schema8) assert.ErrorIs(t, err8, merr.ErrParameterInvalid) }) @@ -580,7 +580,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { }, }, } - err := task.validateSchema(schema) + err := task.validateSchema(context.TODO(), schema) assert.ErrorIs(t, err, merr.ErrParameterInvalid) }) @@ -596,7 +596,7 @@ func Test_createCollectionTask_validateSchema(t *testing.T) { Name: collectionName, Fields: []*schemapb.FieldSchema{}, } - err := task.validateSchema(schema) + err := task.validateSchema(context.TODO(), schema) assert.NoError(t, err) }) } @@ -611,7 +611,7 @@ func Test_createCollectionTask_prepareSchema(t *testing.T) { Schema: []byte("invalid schema"), }, } - err := task.prepareSchema() + err := task.prepareSchema(context.TODO()) assert.Error(t, err) }) @@ -634,7 +634,7 @@ func Test_createCollectionTask_prepareSchema(t *testing.T) { Schema: marshaledSchema, }, } - err = task.prepareSchema() + err = task.prepareSchema(context.TODO()) assert.Error(t, err) }) @@ -661,7 +661,7 @@ func Test_createCollectionTask_prepareSchema(t *testing.T) { Schema: marshaledSchema, }, } - err = task.prepareSchema() + err = task.prepareSchema(context.TODO()) assert.NoError(t, err) }) @@ -688,7 +688,7 @@ func Test_createCollectionTask_prepareSchema(t *testing.T) { Schema: marshaledSchema, }, } - err = task.prepareSchema() + err = task.prepareSchema(context.TODO()) assert.Error(t, err) }) @@ -716,7 +716,7 @@ func Test_createCollectionTask_prepareSchema(t *testing.T) { Schema: marshaledSchema, }, } - err = task.prepareSchema() + err = task.prepareSchema(context.TODO()) assert.Error(t, err) }) } diff --git a/internal/rootcoord/create_partition_task.go b/internal/rootcoord/create_partition_task.go index 568c912ce50a6..22eb31ec17d6e 100644 --- a/internal/rootcoord/create_partition_task.go +++ b/internal/rootcoord/create_partition_task.go @@ -52,7 +52,7 @@ func (t *createPartitionTask) Prepare(ctx context.Context) error { func (t *createPartitionTask) Execute(ctx context.Context) error { for _, partition := range t.collMeta.Partitions { if partition.PartitionName == t.Req.GetPartitionName() { - log.Warn("add duplicate partition", zap.String("collection", t.Req.GetCollectionName()), zap.String("partition", t.Req.GetPartitionName()), zap.Uint64("ts", t.GetTs())) + log.Ctx(ctx).Warn("add duplicate partition", zap.String("collection", t.Req.GetCollectionName()), zap.String("partition", t.Req.GetPartitionName()), zap.Uint64("ts", t.GetTs())) return nil } } diff --git a/internal/rootcoord/dml_channels.go b/internal/rootcoord/dml_channels.go index 8e9719614f4b3..a5e3bbb69e679 100644 --- a/internal/rootcoord/dml_channels.go +++ b/internal/rootcoord/dml_channels.go @@ -178,7 +178,7 @@ func newDmlChannels(ctx context.Context, factory msgstream.Factory, chanNamePref for i, name := range names { ms, err := factory.NewMsgStream(ctx) if err != nil { - log.Error("Failed to add msgstream", + log.Ctx(ctx).Error("Failed to add msgstream", zap.String("name", name), zap.Error(err)) panic("Failed to add msgstream") @@ -202,7 +202,7 @@ func newDmlChannels(ctx context.Context, factory msgstream.Factory, chanNamePref heap.Init(&d.channelsHeap) - log.Info("init dml channels", zap.String("prefix", chanNamePrefix), zap.Int64("num", chanNum)) + log.Ctx(ctx).Info("init dml channels", zap.String("prefix", chanNamePrefix), zap.Int64("num", chanNum)) metrics.RootCoordNumOfDMLChannel.Add(float64(chanNum)) metrics.RootCoordNumOfMsgStream.Add(float64(chanNum)) @@ -276,7 +276,7 @@ func (d *dmlChannels) getChannelNum() int { func (d *dmlChannels) getMsgStreamByName(chanName string) (*dmlMsgStream, error) { dms, ok := d.pool.Get(chanName) if !ok { - log.Error("invalid channelName", zap.String("chanName", chanName)) + log.Ctx(d.ctx).Error("invalid channelName", zap.String("chanName", chanName)) return nil, errors.Newf("invalid channel name: %s", chanName) } return dms, nil @@ -292,7 +292,7 @@ func (d *dmlChannels) broadcast(chanNames []string, pack *msgstream.MsgPack) err dms.mutex.RLock() if dms.refcnt > 0 { if _, err := dms.ms.Broadcast(pack); err != nil { - log.Error("Broadcast failed", zap.Error(err), zap.String("chanName", chanName)) + log.Ctx(d.ctx).Error("Broadcast failed", zap.Error(err), zap.String("chanName", chanName)) dms.mutex.RUnlock() return err } @@ -314,7 +314,7 @@ func (d *dmlChannels) broadcastMark(chanNames []string, pack *msgstream.MsgPack) if dms.refcnt > 0 { ids, err := dms.ms.Broadcast(pack) if err != nil { - log.Error("BroadcastMark failed", zap.Error(err), zap.String("chanName", chanName)) + log.Ctx(d.ctx).Error("BroadcastMark failed", zap.Error(err), zap.String("chanName", chanName)) dms.mutex.RUnlock() return result, err } @@ -378,12 +378,12 @@ func genChannelNames(prefix string, num int64) []string { func parseChannelNameIndex(channelName string) int { index := strings.LastIndex(channelName, "_") if index < 0 { - log.Error("invalid channelName", zap.String("chanName", channelName)) + log.Ctx(context.TODO()).Error("invalid channelName", zap.String("chanName", channelName)) panic("invalid channel name: " + channelName) } index, err := strconv.Atoi(channelName[index+1:]) if err != nil { - log.Error("invalid channelName", zap.String("chanName", channelName), zap.Error(err)) + log.Ctx(context.TODO()).Error("invalid channelName", zap.String("chanName", channelName), zap.Error(err)) panic("invalid channel name: " + channelName) } return index @@ -407,7 +407,7 @@ func getNeedChanNum(setNum int, chanMap map[typeutil.UniqueID][]string) int { panic("topic were empty") } if chanNameSet.Contain(topic) { - log.Error("duplicate topics are pre-created", zap.String("topic", topic)) + log.Ctx(context.TODO()).Error("duplicate topics are pre-created", zap.String("topic", topic)) panic("duplicate topic: " + topic) } chanNameSet.Insert(topic) @@ -416,7 +416,7 @@ func getNeedChanNum(setNum int, chanMap map[typeutil.UniqueID][]string) int { for _, chanNames := range chanMap { for _, chanName := range chanNames { if !chanNameSet.Contain(chanName) { - log.Error("invalid channel that is not in the list when pre-created topic", zap.String("chanName", chanName)) + log.Ctx(context.TODO()).Error("invalid channel that is not in the list when pre-created topic", zap.String("chanName", chanName)) panic("invalid chanName: " + chanName) } } diff --git a/internal/rootcoord/drop_collection_task.go b/internal/rootcoord/drop_collection_task.go index 57e2eea6e235b..c59c898452272 100644 --- a/internal/rootcoord/drop_collection_task.go +++ b/internal/rootcoord/drop_collection_task.go @@ -59,7 +59,7 @@ func (t *dropCollectionTask) Execute(ctx context.Context) error { collMeta, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetDbName(), t.Req.GetCollectionName(), typeutil.MaxTimestamp) if errors.Is(err, merr.ErrCollectionNotFound) { // make dropping collection idempotent. - log.Warn("drop non-existent collection", zap.String("collection", t.Req.GetCollectionName())) + log.Ctx(ctx).Warn("drop non-existent collection", zap.String("collection", t.Req.GetCollectionName())) return nil } diff --git a/internal/rootcoord/drop_partition_task.go b/internal/rootcoord/drop_partition_task.go index ef8f107adb2e1..d25265f05da55 100644 --- a/internal/rootcoord/drop_partition_task.go +++ b/internal/rootcoord/drop_partition_task.go @@ -62,7 +62,7 @@ func (t *dropPartitionTask) Execute(ctx context.Context) error { } } if partID == common.InvalidPartitionID { - log.Warn("drop an non-existent partition", zap.String("collection", t.Req.GetCollectionName()), zap.String("partition", t.Req.GetPartitionName())) + log.Ctx(ctx).Warn("drop an non-existent partition", zap.String("collection", t.Req.GetCollectionName()), zap.String("partition", t.Req.GetPartitionName())) // make dropping partition idempotent. return nil } diff --git a/internal/rootcoord/list_db_task.go b/internal/rootcoord/list_db_task.go index dbc5fa061a2d5..a4e7891428f34 100644 --- a/internal/rootcoord/list_db_task.go +++ b/internal/rootcoord/list_db_task.go @@ -53,7 +53,7 @@ func (t *listDatabaseTask) Execute(ctx context.Context) error { // it will fail if the inner node server use the list database API if err != nil || curUser == util.UserRoot { if err != nil { - log.Warn("get current user from context failed", zap.Error(err)) + log.Ctx(ctx).Warn("get current user from context failed", zap.Error(err)) } privilegeDBs.Insert(util.AnyWord) return privilegeDBs, nil diff --git a/internal/rootcoord/meta_table.go b/internal/rootcoord/meta_table.go index 7574f729296c6..d1030606a3603 100644 --- a/internal/rootcoord/meta_table.go +++ b/internal/rootcoord/meta_table.go @@ -156,7 +156,7 @@ func (mt *MetaTable) reload() error { return err } - log.Info("recover databases", zap.Int("num of dbs", len(dbs))) + log.Ctx(mt.ctx).Info("recover databases", zap.Int("num of dbs", len(dbs))) for _, db := range dbs { mt.dbName2Meta[db.Name] = db } @@ -199,7 +199,7 @@ func (mt *MetaTable) reload() error { metrics.RootCoordNumOfDatabases.Inc() metrics.RootCoordNumOfCollections.WithLabelValues(dbName).Add(float64(collectionNum)) metrics.RootCoordNumOfPartitions.WithLabelValues().Add(float64(partitionNum)) - log.Info("collections recovered from db", zap.String("db_name", dbName), + log.Ctx(mt.ctx).Info("collections recovered from db", zap.String("db_name", dbName), zap.Int64("collection_num", collectionNum), zap.Int64("partition_num", partitionNum)) } @@ -215,7 +215,7 @@ func (mt *MetaTable) reload() error { mt.aliases.insert(dbName, alias.Name, alias.CollectionID) } } - log.Info("RootCoord meta table reload done", zap.Duration("duration", record.ElapseSpan())) + log.Ctx(mt.ctx).Info("RootCoord meta table reload done", zap.Duration("duration", record.ElapseSpan())) return nil } @@ -238,7 +238,7 @@ func (mt *MetaTable) reloadWithNonDatabase() error { } if collectionNum > 0 { - log.Info("recover collections without db", zap.Int64("collection_num", collectionNum), zap.Int64("partition_num", partitionNum)) + log.Ctx(mt.ctx).Info("recover collections without db", zap.Int64("collection_num", collectionNum), zap.Int64("partition_num", partitionNum)) } aliases, err := mt.catalog.ListAliases(mt.ctx, util.NonDBID, typeutil.MaxTimestamp) @@ -311,7 +311,7 @@ func (mt *MetaTable) AlterDatabase(ctx context.Context, oldDB *model.Database, n return err } mt.dbName2Meta[oldDB.Name] = newDB - log.Info("alter database finished", zap.String("dbName", oldDB.Name), zap.Uint64("ts", ts)) + log.Ctx(ctx).Info("alter database finished", zap.String("dbName", oldDB.Name), zap.Uint64("ts", ts)) return nil } @@ -325,11 +325,11 @@ func (mt *MetaTable) DropDatabase(ctx context.Context, dbName string, ts typeuti db, err := mt.getDatabaseByNameInternal(ctx, dbName, typeutil.MaxTimestamp) if err != nil { - log.Warn("not found database", zap.String("db", dbName)) + log.Ctx(ctx).Warn("not found database", zap.String("db", dbName)) return nil } - colls, err := mt.listCollectionFromCache(dbName, true) + colls, err := mt.listCollectionFromCache(ctx, dbName, true) if err != nil { return err } @@ -378,10 +378,10 @@ func (mt *MetaTable) GetDatabaseByName(ctx context.Context, dbName string, ts Ti return mt.getDatabaseByNameInternal(ctx, dbName, ts) } -func (mt *MetaTable) getDatabaseByNameInternal(_ context.Context, dbName string, _ Timestamp) (*model.Database, error) { +func (mt *MetaTable) getDatabaseByNameInternal(ctx context.Context, dbName string, _ Timestamp) (*model.Database, error) { // backward compatibility for rolling upgrade if dbName == "" { - log.Warn("db name is empty") + log.Ctx(ctx).Warn("db name is empty") dbName = util.DefaultDBName } @@ -497,7 +497,7 @@ func (mt *MetaTable) RemoveCollection(ctx context.Context, collectionID UniqueID // which is bigger than `ts1`. So we assume that ts should always be the latest. coll, ok := mt.collID2Meta[collectionID] if !ok { - log.Warn("not found collection, skip remove", zap.Int64("collectionID", collectionID)) + log.Ctx(ctx).Warn("not found collection, skip remove", zap.Int64("collectionID", collectionID)) return nil } @@ -605,7 +605,7 @@ func (mt *MetaTable) GetCollectionByName(ctx context.Context, dbName string, col func (mt *MetaTable) getCollectionByNameInternal(ctx context.Context, dbName string, collectionName string, ts Timestamp) (*model.Collection, error) { // backward compatibility for rolling upgrade if dbName == "" { - log.Warn("db name is empty", zap.String("collectionName", collectionName), zap.Uint64("ts", ts)) + log.Ctx(ctx).Warn("db name is empty", zap.String("collectionName", collectionName), zap.Uint64("ts", ts)) dbName = util.DefaultDBName } @@ -682,7 +682,7 @@ func (mt *MetaTable) ListCollections(ctx context.Context, dbName string, ts Time defer mt.ddLock.RUnlock() if isMaxTs(ts) { - return mt.listCollectionFromCache(dbName, onlyAvail) + return mt.listCollectionFromCache(ctx, dbName, onlyAvail) } db, err := mt.getDatabaseByNameInternal(ctx, dbName, typeutil.MaxTimestamp) @@ -706,10 +706,10 @@ func (mt *MetaTable) ListCollections(ctx context.Context, dbName string, ts Time return onlineCollections, nil } -func (mt *MetaTable) listCollectionFromCache(dbName string, onlyAvail bool) ([]*model.Collection, error) { +func (mt *MetaTable) listCollectionFromCache(ctx context.Context, dbName string, onlyAvail bool) ([]*model.Collection, error) { // backward compatibility for rolling upgrade if dbName == "" { - log.Warn("db name is empty") + log.Ctx(ctx).Warn("db name is empty") dbName = util.DefaultDBName } @@ -755,7 +755,7 @@ func (mt *MetaTable) AlterCollection(ctx context.Context, oldColl *model.Collect return err } mt.collID2Meta[oldColl.CollectionID] = newColl - log.Info("alter collection finished", zap.Int64("collectionID", oldColl.CollectionID), zap.Uint64("ts", ts)) + log.Ctx(ctx).Info("alter collection finished", zap.Int64("collectionID", oldColl.CollectionID), zap.Uint64("ts", ts)) return nil } @@ -962,7 +962,7 @@ func (mt *MetaTable) RemovePartition(ctx context.Context, dbID int64, collection if loc != -1 { coll.Partitions = append(coll.Partitions[:loc], coll.Partitions[loc+1:]...) } - log.Info("remove partition", zap.Int64("collection", collectionID), zap.Int64("partition", partitionID), zap.Uint64("ts", ts)) + log.Ctx(ctx).Info("remove partition", zap.Int64("collection", collectionID), zap.Int64("partition", partitionID), zap.Uint64("ts", ts)) return nil } @@ -971,7 +971,7 @@ func (mt *MetaTable) CreateAlias(ctx context.Context, dbName string, alias strin defer mt.ddLock.Unlock() // backward compatibility for rolling upgrade if dbName == "" { - log.Warn("db name is empty", zap.String("alias", alias), zap.String("collection", collectionName)) + log.Ctx(ctx).Warn("db name is empty", zap.String("alias", alias), zap.String("collection", collectionName)) dbName = util.DefaultDBName } @@ -1002,7 +1002,7 @@ func (mt *MetaTable) CreateAlias(ctx context.Context, dbName string, alias strin // check if alias exists. aliasedCollectionID, ok := mt.aliases.get(dbName, alias) if ok && aliasedCollectionID == collectionID { - log.Warn("add duplicate alias", zap.String("alias", alias), zap.String("collection", collectionName), zap.Uint64("ts", ts)) + log.Ctx(ctx).Warn("add duplicate alias", zap.String("alias", alias), zap.String("collection", collectionName), zap.Uint64("ts", ts)) return nil } else if ok { // TODO: better to check if aliasedCollectionID exist or is available, though not very possible. @@ -1047,7 +1047,7 @@ func (mt *MetaTable) DropAlias(ctx context.Context, dbName string, alias string, defer mt.ddLock.Unlock() // backward compatibility for rolling upgrade if dbName == "" { - log.Warn("db name is empty", zap.String("alias", alias), zap.Uint64("ts", ts)) + log.Ctx(ctx).Warn("db name is empty", zap.String("alias", alias), zap.Uint64("ts", ts)) dbName = util.DefaultDBName } @@ -1076,7 +1076,7 @@ func (mt *MetaTable) AlterAlias(ctx context.Context, dbName string, alias string defer mt.ddLock.Unlock() // backward compatibility for rolling upgrade if dbName == "" { - log.Warn("db name is empty", zap.String("alias", alias), zap.String("collection", collectionName)) + log.Ctx(ctx).Warn("db name is empty", zap.String("alias", alias), zap.String("collection", collectionName)) dbName = util.DefaultDBName } @@ -1143,7 +1143,7 @@ func (mt *MetaTable) DescribeAlias(ctx context.Context, dbName string, alias str defer mt.ddLock.Unlock() if dbName == "" { - log.Warn("db name is empty", zap.String("alias", alias)) + log.Ctx(ctx).Warn("db name is empty", zap.String("alias", alias)) dbName = util.DefaultDBName } @@ -1173,7 +1173,7 @@ func (mt *MetaTable) ListAliases(ctx context.Context, dbName string, collectionN defer mt.ddLock.Unlock() if dbName == "" { - log.Warn("db name is empty", zap.String("collection", collectionName)) + log.Ctx(ctx).Warn("db name is empty", zap.String("collection", collectionName)) dbName = util.DefaultDBName } @@ -1243,7 +1243,7 @@ func (mt *MetaTable) AddCredential(ctx context.Context, credInfo *internalpb.Cre } if len(usernames) >= Params.ProxyCfg.MaxUserNum.GetAsInt() { errMsg := "unable to add user because the number of users has reached the limit" - log.Error(errMsg, zap.Int("max_user_num", Params.ProxyCfg.MaxUserNum.GetAsInt())) + log.Ctx(ctx).Error(errMsg, zap.Int("max_user_num", Params.ProxyCfg.MaxUserNum.GetAsInt())) return errors.New(errMsg) } @@ -1313,18 +1313,18 @@ func (mt *MetaTable) CreateRole(ctx context.Context, tenant string, entity *milv results, err := mt.catalog.ListRole(ctx, tenant, nil, false) if err != nil { - log.Warn("fail to list roles", zap.Error(err)) + log.Ctx(ctx).Warn("fail to list roles", zap.Error(err)) return err } for _, result := range results { if result.GetRole().GetName() == entity.Name { - log.Info("role already exists", zap.String("role", entity.Name)) + log.Ctx(ctx).Info("role already exists", zap.String("role", entity.Name)) return common.NewIgnorableError(errors.Newf("role [%s] already exists", entity)) } } if len(results) >= Params.ProxyCfg.MaxRoleNum.GetAsInt() { errMsg := "unable to create role because the number of roles has reached the limit" - log.Warn(errMsg, zap.Int("max_role_num", Params.ProxyCfg.MaxRoleNum.GetAsInt())) + log.Ctx(ctx).Warn(errMsg, zap.Int("max_role_num", Params.ProxyCfg.MaxRoleNum.GetAsInt())) return errors.New(errMsg) } @@ -1587,7 +1587,7 @@ func (mt *MetaTable) OperatePrivilegeGroup(ctx context.Context, groupName string // merge with current privileges group, err := mt.catalog.GetPrivilegeGroup(ctx, groupName) if err != nil { - log.Warn("fail to get privilege group", zap.String("privilege_group", groupName), zap.Error(err)) + log.Ctx(ctx).Warn("fail to get privilege group", zap.String("privilege_group", groupName), zap.Error(err)) return err } privSet := lo.SliceToMap(group.Privileges, func(p *milvuspb.PrivilegeEntity) (string, struct{}) { @@ -1603,7 +1603,7 @@ func (mt *MetaTable) OperatePrivilegeGroup(ctx context.Context, groupName string delete(privSet, p.Name) } default: - log.Warn("unsupported operate type", zap.Any("operate_type", operateType)) + log.Ctx(ctx).Warn("unsupported operate type", zap.Any("operate_type", operateType)) return fmt.Errorf("unsupported operate type: %v", operateType) } diff --git a/internal/rootcoord/meta_table_test.go b/internal/rootcoord/meta_table_test.go index 9329eab95d7a2..39d2ae9ef6d82 100644 --- a/internal/rootcoord/meta_table_test.go +++ b/internal/rootcoord/meta_table_test.go @@ -1914,17 +1914,17 @@ func TestMetaTable_EmtpyDatabaseName(t *testing.T) { }, } - ret, err := mt.listCollectionFromCache("none", false) + ret, err := mt.listCollectionFromCache(context.TODO(), "none", false) assert.Error(t, err) assert.Nil(t, ret) - ret, err = mt.listCollectionFromCache("", false) + ret, err = mt.listCollectionFromCache(context.TODO(), "", false) assert.NoError(t, err) assert.Equal(t, 2, len(ret)) assert.Contains(t, []int64{1, 2}, ret[0].CollectionID) assert.Contains(t, []int64{1, 2}, ret[1].CollectionID) - ret, err = mt.listCollectionFromCache("db2", false) + ret, err = mt.listCollectionFromCache(context.TODO(), "db2", false) assert.NoError(t, err) assert.Equal(t, 1, len(ret)) assert.Equal(t, int64(3), ret[0].CollectionID) diff --git a/internal/rootcoord/root_coord.go b/internal/rootcoord/root_coord.go index 0222d0caa52a6..3659e743165a8 100644 --- a/internal/rootcoord/root_coord.go +++ b/internal/rootcoord/root_coord.go @@ -864,7 +864,7 @@ func (c *Core) Stop() error { // GetComponentStates get states of components func (c *Core) GetComponentStates(ctx context.Context, req *milvuspb.GetComponentStatesRequest) (*milvuspb.ComponentStates, error) { code := c.GetStateCode() - log.Debug("RootCoord current state", zap.String("StateCode", code.String())) + log.Ctx(ctx).Debug("RootCoord current state", zap.String("StateCode", code.String())) nodeID := common.NotRegisteredID if c.session != nil && c.session.Registered() { @@ -2099,8 +2099,7 @@ func (c *Core) CreateCredential(ctx context.Context, credInfo *internalpb.Creden ctxLog.Warn("CreateCredential add cache failed", zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc() } - log.Debug("CreateCredential success", zap.String("role", typeutil.RootCoordRole), - zap.String("username", credInfo.Username)) + ctxLog.Debug("CreateCredential success") metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) @@ -2162,7 +2161,7 @@ func (c *Core) UpdateCredential(ctx context.Context, credInfo *internalpb.Creden metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc() return merr.StatusWithErrorCode(err, commonpb.ErrorCode_UpdateCredentialFailure), nil } - log.Debug("UpdateCredential success") + ctxLog.Debug("UpdateCredential success") metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) @@ -2402,7 +2401,7 @@ func (c *Core) OperateUserRole(ctx context.Context, in *milvuspb.OperateUserRole redoTask.AddSyncStep(NewSimpleStep("operate user role meta data", func(ctx context.Context) ([]nestedStep, error) { err := c.meta.OperateUserRole(ctx, util.DefaultTenant, &milvuspb.UserEntity{Name: in.Username}, &milvuspb.RoleEntity{Name: in.RoleName}, in.Type) if err != nil && !common.IsIgnorableError(err) { - log.Warn("operate user role mata data failed", zap.Error(err)) + ctxLog.Warn("operate user role mata data failed", zap.Error(err)) return nil, err } return nil, nil @@ -2416,14 +2415,14 @@ func (c *Core) OperateUserRole(ctx context.Context, in *milvuspb.OperateUserRole opType = int32(typeutil.CacheRemoveUserFromRole) default: errMsg := "invalid operate type for the OperateUserRole api" - log.Warn(errMsg, zap.Any("in", in)) + ctxLog.Warn(errMsg, zap.Any("in", in)) return nil, nil } if err := c.proxyClientManager.RefreshPolicyInfoCache(ctx, &proxypb.RefreshPolicyInfoCacheRequest{ OpType: opType, OpKey: funcutil.EncodeUserRoleCache(in.Username, in.RoleName), }); err != nil { - log.Warn("fail to refresh policy info cache", zap.Any("in", in), zap.Error(err)) + ctxLog.Warn("fail to refresh policy info cache", zap.Any("in", in), zap.Error(err)) return nil, err } return nil, nil @@ -2431,7 +2430,7 @@ func (c *Core) OperateUserRole(ctx context.Context, in *milvuspb.OperateUserRole err := redoTask.Execute(ctx) if err != nil { errMsg := "fail to execute task when operate the user and role" - log.Warn(errMsg, zap.Error(err)) + ctxLog.Warn(errMsg, zap.Error(err)) return merr.StatusWithErrorCode(errors.New(errMsg), commonpb.ErrorCode_OperateUserRoleFailure), nil } @@ -2679,7 +2678,7 @@ func (c *Core) OperatePrivilege(ctx context.Context, in *milvuspb.OperatePrivile err := c.meta.OperatePrivilege(ctx, util.DefaultTenant, in.Entity, in.Type) if err != nil && !common.IsIgnorableError(err) { - log.Warn("fail to operate the privilege", zap.Any("in", in), zap.Error(err)) + ctxLog.Warn("fail to operate the privilege", zap.Any("in", in), zap.Error(err)) return nil, err } return nil, nil @@ -2724,7 +2723,7 @@ func (c *Core) OperatePrivilege(ctx context.Context, in *milvuspb.OperatePrivile err := redoTask.Execute(ctx) if err != nil { errMsg := "fail to execute task when operating the privilege" - log.Warn(errMsg, zap.Error(err)) + ctxLog.Warn(errMsg, zap.Error(err)) return merr.StatusWithErrorCode(err, commonpb.ErrorCode_OperatePrivilegeFailure), nil } @@ -2760,7 +2759,7 @@ func (c *Core) operatePrivilegeCommonCheck(ctx context.Context, in *milvuspb.Ope return errors.New("the user entity in the grantor entity is nil or empty") } if _, err := c.meta.SelectUser(ctx, util.DefaultTenant, &milvuspb.UserEntity{Name: entity.User.Name}, false); err != nil { - log.Warn("fail to select the user", zap.String("username", entity.User.Name), zap.Error(err)) + log.Ctx(ctx).Warn("fail to select the user", zap.String("username", entity.User.Name), zap.Error(err)) return errors.New("not found the user, maybe the user isn't existed or internal system error") } if entity.Privilege == nil { @@ -2941,7 +2940,7 @@ func (c *Core) RestoreRBAC(ctx context.Context, in *milvuspb.RestoreRBACMetaRequ redoTask := newBaseRedoTask(c.stepExecutor) redoTask.AddSyncStep(NewSimpleStep("restore rbac meta data", func(ctx context.Context) ([]nestedStep, error) { if err := c.meta.RestoreRBAC(ctx, util.DefaultTenant, in.RBACMeta); err != nil { - log.Warn("fail to restore rbac meta data", zap.Any("in", in), zap.Error(err)) + ctxLog.Warn("fail to restore rbac meta data", zap.Any("in", in), zap.Error(err)) return nil, err } return nil, nil @@ -2950,7 +2949,7 @@ func (c *Core) RestoreRBAC(ctx context.Context, in *milvuspb.RestoreRBACMetaRequ if err := c.proxyClientManager.RefreshPolicyInfoCache(c.ctx, &proxypb.RefreshPolicyInfoCacheRequest{ OpType: int32(typeutil.CacheRefresh), }); err != nil { - log.Warn("fail to refresh policy info cache", zap.Any("in", in), zap.Error(err)) + ctxLog.Warn("fail to refresh policy info cache", zap.Any("in", in), zap.Error(err)) return nil, err } return nil, nil @@ -2959,7 +2958,7 @@ func (c *Core) RestoreRBAC(ctx context.Context, in *milvuspb.RestoreRBACMetaRequ err := redoTask.Execute(ctx) if err != nil { errMsg := "fail to execute task when restore rbac meta data" - log.Warn(errMsg, zap.Error(err)) + ctxLog.Warn(errMsg, zap.Error(err)) return merr.StatusWithErrorCode(err, commonpb.ErrorCode_OperatePrivilegeFailure), nil } @@ -3198,7 +3197,7 @@ func (c *Core) OperatePrivilegeGroup(ctx context.Context, in *milvuspb.OperatePr redoTask.AddSyncStep(NewSimpleStep("operate privilege group", func(ctx context.Context) ([]nestedStep, error) { groups, err := c.meta.ListPrivilegeGroups(ctx) if err != nil && !common.IsIgnorableError(err) { - log.Warn("fail to list privilege groups", zap.Error(err)) + ctxLog.Warn("fail to list privilege groups", zap.Error(err)) return nil, err } currGroups := lo.SliceToMap(groups, func(group *milvuspb.PrivilegeGroupInfo) (string, []*milvuspb.PrivilegeEntity) { @@ -3288,7 +3287,7 @@ func (c *Core) OperatePrivilegeGroup(ctx context.Context, in *milvuspb.OperatePr OpType: opType, OpKey: funcutil.PolicyForPrivileges(rolesToRevoke), }); err != nil { - log.Warn("fail to refresh policy info cache for revoke privileges in operate privilege group", zap.Any("in", in), zap.Error(err)) + ctxLog.Warn("fail to refresh policy info cache for revoke privileges in operate privilege group", zap.Any("in", in), zap.Error(err)) return nil, err } } @@ -3299,7 +3298,7 @@ func (c *Core) OperatePrivilegeGroup(ctx context.Context, in *milvuspb.OperatePr OpType: opType, OpKey: funcutil.PolicyForPrivileges(rolesToGrant), }); err != nil { - log.Warn("fail to refresh policy info cache for grants privilege in operate privilege group", zap.Any("in", in), zap.Error(err)) + ctxLog.Warn("fail to refresh policy info cache for grants privilege in operate privilege group", zap.Any("in", in), zap.Error(err)) return nil, err } } @@ -3309,7 +3308,7 @@ func (c *Core) OperatePrivilegeGroup(ctx context.Context, in *milvuspb.OperatePr redoTask.AddSyncStep(NewSimpleStep("operate privilege group meta data", func(ctx context.Context) ([]nestedStep, error) { err := c.meta.OperatePrivilegeGroup(ctx, in.GroupName, in.Privileges, in.Type) if err != nil && !common.IsIgnorableError(err) { - log.Warn("fail to operate privilege group", zap.Error(err)) + ctxLog.Warn("fail to operate privilege group", zap.Error(err)) } return nil, err })) diff --git a/internal/rootcoord/show_collection_task.go b/internal/rootcoord/show_collection_task.go index f5a3fa5eeb725..b0f40be928d61 100644 --- a/internal/rootcoord/show_collection_task.go +++ b/internal/rootcoord/show_collection_task.go @@ -60,7 +60,7 @@ func (t *showCollectionTask) Execute(ctx context.Context) error { curUser, err := contextutil.GetCurUserFromContext(ctx) if err != nil || curUser == util.UserRoot { if err != nil { - log.Warn("get current user from context failed", zap.Error(err)) + log.Ctx(ctx).Warn("get current user from context failed", zap.Error(err)) } privilegeColls.Insert(util.AnyWord) return privilegeColls, nil diff --git a/internal/rootcoord/step.go b/internal/rootcoord/step.go index 9b18df3a3c38e..bf5c9f7acf166 100644 --- a/internal/rootcoord/step.go +++ b/internal/rootcoord/step.go @@ -21,6 +21,8 @@ import ( "fmt" "time" + "go.uber.org/zap" + "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" "github.com/milvus-io/milvus-proto/go-api/v2/msgpb" @@ -28,6 +30,7 @@ import ( "github.com/milvus-io/milvus/internal/metastore/model" pb "github.com/milvus-io/milvus/internal/proto/etcdpb" "github.com/milvus-io/milvus/internal/util/proxyutil" + "github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/streaming/util/message" "github.com/milvus-io/milvus/pkg/util/commonpbutil" ) @@ -297,6 +300,7 @@ type releaseCollectionStep struct { func (s *releaseCollectionStep) Execute(ctx context.Context) ([]nestedStep, error) { err := s.core.broker.ReleaseCollection(ctx, s.collectionID) + log.Ctx(ctx).Info("release collection done", zap.Int64("collectionID", s.collectionID)) return nil, err } diff --git a/internal/rootcoord/step_executor.go b/internal/rootcoord/step_executor.go index 5e63faadd86b5..bbadf6252eee8 100644 --- a/internal/rootcoord/step_executor.go +++ b/internal/rootcoord/step_executor.go @@ -65,14 +65,14 @@ func (s *stepStack) Execute(ctx context.Context) *stepStack { if !retry.IsRecoverable(err) { if !skipLog { - log.Warn("failed to execute step, not able to reschedule", zap.Error(err), zap.String("step", todo.Desc())) + log.Ctx(ctx).Warn("failed to execute step, not able to reschedule", zap.Error(err), zap.String("step", todo.Desc())) } return nil } if err != nil { s.steps = nil // let's can be collected. if !skipLog { - log.Warn("failed to execute step, wait for reschedule", zap.Error(err), zap.String("step", todo.Desc())) + log.Ctx(ctx).Warn("failed to execute step, wait for reschedule", zap.Error(err), zap.String("step", todo.Desc())) } return &stepStack{steps: steps} } diff --git a/internal/rootcoord/timeticksync.go b/internal/rootcoord/timeticksync.go index 7dc0f81fc77e3..1b4ea74539d13 100644 --- a/internal/rootcoord/timeticksync.go +++ b/internal/rootcoord/timeticksync.go @@ -124,7 +124,7 @@ func newTimeTickSync(ctx context.Context, sourceID int64, factory msgstream.Fact // recover physical channels for all collections for collID, chanNames := range chanMap { dmlChannels.addChannels(chanNames...) - log.Info("recover physical channels", zap.Int64("collectionID", collID), zap.Strings("physical channels", chanNames)) + log.Ctx(ctx).Info("recover physical channels", zap.Int64("collectionID", collID), zap.Strings("physical channels", chanNames)) } return &timetickSync{ diff --git a/internal/rootcoord/undo.go b/internal/rootcoord/undo.go index 29e9b7dfac05b..f41e00394b292 100644 --- a/internal/rootcoord/undo.go +++ b/internal/rootcoord/undo.go @@ -52,7 +52,7 @@ func (b *baseUndoTask) Execute(ctx context.Context) error { todoStep := b.todoStep[i] // no children step in normal case. if _, err := todoStep.Execute(ctx); err != nil { - log.Warn("failed to execute step, trying to undo", zap.Error(err), zap.String("desc", todoStep.Desc())) + log.Ctx(ctx).Warn("failed to execute step, trying to undo", zap.Error(err), zap.String("desc", todoStep.Desc())) undoSteps := b.undoStep[:i] b.undoStep = nil // let baseUndoTask can be collected. go b.stepExecutor.AddSteps(&stepStack{undoSteps}) diff --git a/internal/util/sessionutil/session_util.go b/internal/util/sessionutil/session_util.go index 5358fc6955db4..8603bfd25ca10 100644 --- a/internal/util/sessionutil/session_util.go +++ b/internal/util/sessionutil/session_util.go @@ -231,7 +231,7 @@ func NewSession(ctx context.Context, opts ...SessionOption) *Session { func NewSessionWithEtcd(ctx context.Context, metaRoot string, client *clientv3.Client, opts ...SessionOption) *Session { hostName, hostNameErr := os.Hostname() if hostNameErr != nil { - log.Error("get host name fail", zap.Error(hostNameErr)) + log.Ctx(ctx).Error("get host name fail", zap.Error(hostNameErr)) } session := &Session{ @@ -260,7 +260,7 @@ func NewSessionWithEtcd(ctx context.Context, metaRoot string, client *clientv3.C session.UpdateRegistered(false) connectEtcdFn := func() error { - log.Debug("Session try to connect to etcd") + log.Ctx(ctx).Debug("Session try to connect to etcd") ctx2, cancel2 := context.WithTimeout(session.ctx, 5*time.Second) defer cancel2() if _, err := client.Get(ctx2, "health"); err != nil { @@ -271,11 +271,11 @@ func NewSessionWithEtcd(ctx context.Context, metaRoot string, client *clientv3.C } err := retry.Do(ctx, connectEtcdFn, retry.Attempts(100)) if err != nil { - log.Warn("failed to initialize session", + log.Ctx(ctx).Warn("failed to initialize session", zap.Error(err)) return nil } - log.Debug("Session connect to etcd success") + log.Ctx(ctx).Debug("Session connect to etcd success") return session }