diff --git a/cmd/tools/migration/mmap/tool/main.go b/cmd/tools/migration/mmap/tool/main.go index 8975ffe59ef54..a9957a6ea4a49 100644 --- a/cmd/tools/migration/mmap/tool/main.go +++ b/cmd/tools/migration/mmap/tool/main.go @@ -134,7 +134,7 @@ func prepareRootCoordMeta(ctx context.Context, allocator tso.Allocator) rootcoor if ss, err = kvmetestore.NewSuffixSnapshot(metaKV, kvmetestore.SnapshotsSep, paramtable.Get().EtcdCfg.MetaRootPath.GetValue(), kvmetestore.SnapshotPrefix); err != nil { panic(err) } - catalog = &kvmetestore.Catalog{Txn: metaKV, Snapshot: ss} + catalog = kvmetestore.NewCatalog(metaKV, ss) case util.MetaStoreTypeTiKV: log.Info("Using tikv as meta storage.") var metaKV kv.MetaKv @@ -148,7 +148,7 @@ func prepareRootCoordMeta(ctx context.Context, allocator tso.Allocator) rootcoor if ss, err = kvmetestore.NewSuffixSnapshot(metaKV, kvmetestore.SnapshotsSep, paramtable.Get().TiKVCfg.MetaRootPath.GetValue(), kvmetestore.SnapshotPrefix); err != nil { panic(err) } - catalog = &kvmetestore.Catalog{Txn: metaKV, Snapshot: ss} + catalog = kvmetestore.NewCatalog(metaKV, ss) default: panic(fmt.Sprintf("MetaStoreType %s not supported", paramtable.Get().MetaStoreCfg.MetaStoreType.GetValue())) } diff --git a/internal/datacoord/broker/coordinator_broker.go b/internal/datacoord/broker/coordinator_broker.go index 7f079be5f631a..5536618e58fb4 100644 --- a/internal/datacoord/broker/coordinator_broker.go +++ b/internal/datacoord/broker/coordinator_broker.go @@ -25,6 +25,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" + "github.com/milvus-io/milvus/internal/proto/rootcoordpb" "github.com/milvus-io/milvus/internal/types" "github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/util/commonpbutil" @@ -37,6 +38,7 @@ type Broker interface { DescribeCollectionInternal(ctx context.Context, collectionID int64) (*milvuspb.DescribeCollectionResponse, error) ShowPartitionsInternal(ctx context.Context, collectionID int64) ([]int64, error) ShowCollections(ctx context.Context, dbName string) (*milvuspb.ShowCollectionsResponse, error) + ShowCollectionsInternal(ctx context.Context) (*rootcoordpb.ShowCollectionsInternalResponse, error) ListDatabases(ctx context.Context) (*milvuspb.ListDatabasesResponse, error) HasCollection(ctx context.Context, collectionID int64) (bool, error) } @@ -116,6 +118,23 @@ func (b *coordinatorBroker) ShowCollections(ctx context.Context, dbName string) return resp, nil } +func (b *coordinatorBroker) ShowCollectionsInternal(ctx context.Context) (*rootcoordpb.ShowCollectionsInternalResponse, error) { + ctx, cancel := context.WithTimeout(ctx, paramtable.Get().QueryCoordCfg.BrokerTimeout.GetAsDuration(time.Millisecond)) + defer cancel() + resp, err := b.rootCoord.ShowCollectionsInternal(ctx, &rootcoordpb.ShowCollectionsInternalRequest{ + Base: commonpbutil.NewMsgBase( + commonpbutil.WithMsgType(commonpb.MsgType_ShowCollections), + ), + }) + + if err = merr.CheckRPCCall(resp, err); err != nil { + log.Warn("ShowCollectionsInternal failed", zap.Error(err)) + return nil, err + } + + return resp, nil +} + func (b *coordinatorBroker) ListDatabases(ctx context.Context) (*milvuspb.ListDatabasesResponse, error) { ctx, cancel := context.WithTimeout(ctx, paramtable.Get().QueryCoordCfg.BrokerTimeout.GetAsDuration(time.Millisecond)) defer cancel() diff --git a/internal/datacoord/broker/mock_coordinator_broker.go b/internal/datacoord/broker/mock_coordinator_broker.go index bdec77b28c5c4..47df231613150 100644 --- a/internal/datacoord/broker/mock_coordinator_broker.go +++ b/internal/datacoord/broker/mock_coordinator_broker.go @@ -7,6 +7,8 @@ import ( milvuspb "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" mock "github.com/stretchr/testify/mock" + + rootcoordpb "github.com/milvus-io/milvus/internal/proto/rootcoordpb" ) // MockBroker is an autogenerated mock type for the Broker type @@ -255,6 +257,64 @@ func (_c *MockBroker_ShowCollections_Call) RunAndReturn(run func(context.Context return _c } +// ShowCollectionsInternal provides a mock function with given fields: ctx +func (_m *MockBroker) ShowCollectionsInternal(ctx context.Context) (*rootcoordpb.ShowCollectionsInternalResponse, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ShowCollectionsInternal") + } + + var r0 *rootcoordpb.ShowCollectionsInternalResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*rootcoordpb.ShowCollectionsInternalResponse, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *rootcoordpb.ShowCollectionsInternalResponse); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rootcoordpb.ShowCollectionsInternalResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBroker_ShowCollectionsInternal_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ShowCollectionsInternal' +type MockBroker_ShowCollectionsInternal_Call struct { + *mock.Call +} + +// ShowCollectionsInternal is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockBroker_Expecter) ShowCollectionsInternal(ctx interface{}) *MockBroker_ShowCollectionsInternal_Call { + return &MockBroker_ShowCollectionsInternal_Call{Call: _e.mock.On("ShowCollectionsInternal", ctx)} +} + +func (_c *MockBroker_ShowCollectionsInternal_Call) Run(run func(ctx context.Context)) *MockBroker_ShowCollectionsInternal_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockBroker_ShowCollectionsInternal_Call) Return(_a0 *rootcoordpb.ShowCollectionsInternalResponse, _a1 error) *MockBroker_ShowCollectionsInternal_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBroker_ShowCollectionsInternal_Call) RunAndReturn(run func(context.Context) (*rootcoordpb.ShowCollectionsInternalResponse, error)) *MockBroker_ShowCollectionsInternal_Call { + _c.Call.Return(run) + return _c +} + // ShowPartitionsInternal provides a mock function with given fields: ctx, collectionID func (_m *MockBroker) ShowPartitionsInternal(ctx context.Context, collectionID int64) ([]int64, error) { ret := _m.Called(ctx, collectionID) diff --git a/internal/datacoord/compaction_task_clustering_test.go b/internal/datacoord/compaction_task_clustering_test.go index 9c580e0fc2ab1..d8d3fdde09010 100644 --- a/internal/datacoord/compaction_task_clustering_test.go +++ b/internal/datacoord/compaction_task_clustering_test.go @@ -31,6 +31,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/schemapb" "github.com/milvus-io/milvus/internal/datacoord/allocator" + "github.com/milvus-io/milvus/internal/datacoord/broker" "github.com/milvus-io/milvus/internal/datacoord/session" "github.com/milvus-io/milvus/internal/metastore/kv/datacoord" "github.com/milvus-io/milvus/internal/metastore/model" @@ -61,7 +62,9 @@ func (s *ClusteringCompactionTaskSuite) SetupTest() { ctx := context.Background() cm := storage.NewLocalChunkManager(storage.RootPath("")) catalog := datacoord.NewCatalog(NewMetaMemoryKV(), "", "") - meta, err := newMeta(ctx, catalog, cm) + broker := broker.NewMockBroker(s.T()) + broker.EXPECT().ShowCollectionsInternal(mock.Anything).Return(nil, nil) + meta, err := newMeta(ctx, catalog, cm, broker) s.NoError(err) s.meta = meta diff --git a/internal/datacoord/garbage_collector_test.go b/internal/datacoord/garbage_collector_test.go index c2527c0e3b87f..da35784944b4e 100644 --- a/internal/datacoord/garbage_collector_test.go +++ b/internal/datacoord/garbage_collector_test.go @@ -63,7 +63,7 @@ func Test_garbageCollector_basic(t *testing.T) { cli, _, _, _, _, err := initUtOSSEnv(bucketName, rootPath, 0) require.NoError(t, err) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) t.Run("normal gc", func(t *testing.T) { @@ -118,7 +118,7 @@ func Test_garbageCollector_scan(t *testing.T) { cli, inserts, stats, delta, others, err := initUtOSSEnv(bucketName, rootPath, 4) require.NoError(t, err) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) t.Run("key is reference", func(t *testing.T) { @@ -1602,7 +1602,7 @@ func (s *GarbageCollectorSuite) SetupTest() { s.cli, s.inserts, s.stats, s.delta, s.others, err = initUtOSSEnv(s.bucketName, s.rootPath, 4) s.Require().NoError(err) - s.meta, err = newMemoryMeta() + s.meta, err = newMemoryMeta(s.T()) s.Require().NoError(err) } diff --git a/internal/datacoord/import_checker_test.go b/internal/datacoord/import_checker_test.go index 3feea4e13fedf..980870836dfe6 100644 --- a/internal/datacoord/import_checker_test.go +++ b/internal/datacoord/import_checker_test.go @@ -52,7 +52,7 @@ func (s *ImportCheckerSuite) SetupTest() { catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil) catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil) catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil) - catalog.EXPECT().ListSegments(mock.Anything).Return(nil, nil) + catalog.EXPECT().ListSegments(mock.Anything, mock.Anything).Return(nil, nil) catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(nil, nil) catalog.EXPECT().ListIndexes(mock.Anything).Return(nil, nil) catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return(nil, nil) @@ -68,10 +68,11 @@ func (s *ImportCheckerSuite) SetupTest() { s.NoError(err) s.imeta = imeta - meta, err := newMeta(context.TODO(), catalog, nil) - s.NoError(err) - broker := broker2.NewMockBroker(s.T()) + broker.EXPECT().ShowCollectionsInternal(mock.Anything).Return(nil, nil) + + meta, err := newMeta(context.TODO(), catalog, nil, broker) + s.NoError(err) sjm := NewMockStatsJobManager(s.T()) diff --git a/internal/datacoord/import_scheduler_test.go b/internal/datacoord/import_scheduler_test.go index d6f6ad3c4debc..983490e82410a 100644 --- a/internal/datacoord/import_scheduler_test.go +++ b/internal/datacoord/import_scheduler_test.go @@ -28,6 +28,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/schemapb" "github.com/milvus-io/milvus/internal/datacoord/allocator" + "github.com/milvus-io/milvus/internal/datacoord/broker" "github.com/milvus-io/milvus/internal/datacoord/session" "github.com/milvus-io/milvus/internal/metastore/mocks" "github.com/milvus-io/milvus/internal/proto/datapb" @@ -56,7 +57,7 @@ func (s *ImportSchedulerSuite) SetupTest() { s.catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil) s.catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil) s.catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil) - s.catalog.EXPECT().ListSegments(mock.Anything).Return(nil, nil) + s.catalog.EXPECT().ListSegments(mock.Anything, mock.Anything).Return(nil, nil) s.catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(nil, nil) s.catalog.EXPECT().ListIndexes(mock.Anything).Return(nil, nil) s.catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return(nil, nil) @@ -67,7 +68,9 @@ func (s *ImportSchedulerSuite) SetupTest() { s.cluster = NewMockCluster(s.T()) s.alloc = allocator.NewMockAllocator(s.T()) - s.meta, err = newMeta(context.TODO(), s.catalog, nil) + broker := broker.NewMockBroker(s.T()) + broker.EXPECT().ShowCollectionsInternal(mock.Anything).Return(nil, nil) + s.meta, err = newMeta(context.TODO(), s.catalog, nil, broker) s.NoError(err) s.meta.AddCollection(&collectionInfo{ ID: s.collectionID, diff --git a/internal/datacoord/import_util_test.go b/internal/datacoord/import_util_test.go index ddfc21e19ef1e..8c66f5d7c0544 100644 --- a/internal/datacoord/import_util_test.go +++ b/internal/datacoord/import_util_test.go @@ -32,6 +32,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus/internal/datacoord/allocator" + "github.com/milvus-io/milvus/internal/datacoord/broker" "github.com/milvus-io/milvus/internal/json" "github.com/milvus-io/milvus/internal/metastore/mocks" mocks2 "github.com/milvus-io/milvus/internal/mocks" @@ -106,7 +107,7 @@ func TestImportUtil_NewImportTasks(t *testing.T) { alloc.EXPECT().AllocTimestamp(mock.Anything).Return(rand.Uint64(), nil) catalog := mocks.NewDataCoordCatalog(t) - catalog.EXPECT().ListSegments(mock.Anything).Return(nil, nil) + catalog.EXPECT().ListSegments(mock.Anything, mock.Anything).Return(nil, nil) catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(nil, nil) catalog.EXPECT().ListIndexes(mock.Anything).Return(nil, nil) catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return(nil, nil) @@ -116,7 +117,9 @@ func TestImportUtil_NewImportTasks(t *testing.T) { catalog.EXPECT().ListPartitionStatsInfos(mock.Anything).Return(nil, nil) catalog.EXPECT().ListStatsTasks(mock.Anything).Return(nil, nil) - meta, err := newMeta(context.TODO(), catalog, nil) + broker := broker.NewMockBroker(t) + broker.EXPECT().ShowCollectionsInternal(mock.Anything).Return(nil, nil) + meta, err := newMeta(context.TODO(), catalog, nil, broker) assert.NoError(t, err) tasks, err := NewImportTasks(fileGroups, job, alloc, meta) @@ -158,7 +161,7 @@ func TestImportUtil_AssembleRequest(t *testing.T) { } catalog := mocks.NewDataCoordCatalog(t) - catalog.EXPECT().ListSegments(mock.Anything).Return(nil, nil) + catalog.EXPECT().ListSegments(mock.Anything, mock.Anything).Return(nil, nil) catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(nil, nil) catalog.EXPECT().ListIndexes(mock.Anything).Return(nil, nil) catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return(nil, nil) @@ -175,7 +178,9 @@ func TestImportUtil_AssembleRequest(t *testing.T) { }) alloc.EXPECT().AllocTimestamp(mock.Anything).Return(800, nil) - meta, err := newMeta(context.TODO(), catalog, nil) + broker := broker.NewMockBroker(t) + broker.EXPECT().ShowCollectionsInternal(mock.Anything).Return(nil, nil) + meta, err := newMeta(context.TODO(), catalog, nil, broker) assert.NoError(t, err) segment := &SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{ID: 5, IsImporting: true}, @@ -244,7 +249,7 @@ func TestImportUtil_CheckDiskQuota(t *testing.T) { catalog.EXPECT().SavePreImportTask(mock.Anything, mock.Anything).Return(nil) catalog.EXPECT().ListIndexes(mock.Anything).Return(nil, nil) catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return(nil, nil) - catalog.EXPECT().ListSegments(mock.Anything).Return(nil, nil) + catalog.EXPECT().ListSegments(mock.Anything, mock.Anything).Return(nil, nil) catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(nil, nil) catalog.EXPECT().AddSegment(mock.Anything, mock.Anything).Return(nil) catalog.EXPECT().ListAnalyzeTasks(mock.Anything).Return(nil, nil) @@ -255,7 +260,9 @@ func TestImportUtil_CheckDiskQuota(t *testing.T) { imeta, err := NewImportMeta(context.TODO(), catalog) assert.NoError(t, err) - meta, err := newMeta(context.TODO(), catalog, nil) + broker := broker.NewMockBroker(t) + broker.EXPECT().ShowCollectionsInternal(mock.Anything).Return(nil, nil) + meta, err := newMeta(context.TODO(), catalog, nil, broker) assert.NoError(t, err) job := &importJob{ @@ -424,7 +431,7 @@ func TestImportUtil_GetImportProgress(t *testing.T) { catalog.EXPECT().ListImportJobs(mock.Anything).Return(nil, nil) catalog.EXPECT().ListPreImportTasks(mock.Anything).Return(nil, nil) catalog.EXPECT().ListImportTasks(mock.Anything).Return(nil, nil) - catalog.EXPECT().ListSegments(mock.Anything).Return(nil, nil) + catalog.EXPECT().ListSegments(mock.Anything, mock.Anything).Return(nil, nil) catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(nil, nil) catalog.EXPECT().ListIndexes(mock.Anything).Return(nil, nil) catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return(nil, nil) @@ -441,7 +448,9 @@ func TestImportUtil_GetImportProgress(t *testing.T) { imeta, err := NewImportMeta(context.TODO(), catalog) assert.NoError(t, err) - meta, err := newMeta(context.TODO(), catalog, nil) + broker := broker.NewMockBroker(t) + broker.EXPECT().ShowCollectionsInternal(mock.Anything).Return(nil, nil) + meta, err := newMeta(context.TODO(), catalog, nil, broker) assert.NoError(t, err) file1 := &internalpb.ImportFile{ diff --git a/internal/datacoord/meta.go b/internal/datacoord/meta.go index 20f72ec454562..c2452a5e3d7a6 100644 --- a/internal/datacoord/meta.go +++ b/internal/datacoord/meta.go @@ -43,6 +43,7 @@ import ( "github.com/milvus-io/milvus/pkg/common" "github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/metrics" + "github.com/milvus-io/milvus/pkg/util/conc" "github.com/milvus-io/milvus/pkg/util/funcutil" "github.com/milvus-io/milvus/pkg/util/lock" "github.com/milvus-io/milvus/pkg/util/merr" @@ -139,7 +140,7 @@ type collectionInfo struct { } // NewMeta creates meta from provided `kv.TxnKV` -func newMeta(ctx context.Context, catalog metastore.DataCoordCatalog, chunkManager storage.ChunkManager) (*meta, error) { +func newMeta(ctx context.Context, catalog metastore.DataCoordCatalog, chunkManager storage.ChunkManager, broker broker.Broker) (*meta, error) { im, err := newIndexMeta(ctx, catalog) if err != nil { return nil, err @@ -177,7 +178,7 @@ func newMeta(ctx context.Context, catalog metastore.DataCoordCatalog, chunkManag compactionTaskMeta: ctm, statsTaskMeta: stm, } - err = mt.reloadFromKV() + err = mt.reloadFromKV(broker) if err != nil { return nil, err } @@ -185,39 +186,73 @@ func newMeta(ctx context.Context, catalog metastore.DataCoordCatalog, chunkManag } // reloadFromKV loads meta from KV storage -func (m *meta) reloadFromKV() error { +func (m *meta) reloadFromKV(broker broker.Broker) error { record := timerecord.NewTimeRecorder("datacoord") - segments, err := m.catalog.ListSegments(m.ctx) + + resp, err := broker.ShowCollectionsInternal(m.ctx) if err != nil { return err } - metrics.DataCoordNumCollections.WithLabelValues().Set(0) - metrics.DataCoordNumSegments.Reset() - numStoredRows := int64(0) - for _, segment := range segments { - // segments from catalog.ListSegments will not have logPath - m.segments.SetSegment(segment.ID, NewSegmentInfo(segment)) - metrics.DataCoordNumSegments.WithLabelValues(segment.GetState().String(), segment.GetLevel().String(), getSortStatus(segment.GetIsSorted())).Inc() - if segment.State == commonpb.SegmentState_Flushed { - numStoredRows += segment.NumOfRows - - insertFileNum := 0 - for _, fieldBinlog := range segment.GetBinlogs() { - insertFileNum += len(fieldBinlog.GetBinlogs()) - } - metrics.FlushedSegmentFileNum.WithLabelValues(metrics.InsertFileLabel).Observe(float64(insertFileNum)) + log.Info("datacoord show collections done", zap.Duration("dur", record.RecordSpan())) + + collectionIDs := make([]int64, 0, 4096) + for _, collections := range resp.GetDbCollections() { + collectionIDs = append(collectionIDs, collections.GetCollectionIDs()...) + } - statFileNum := 0 - for _, fieldBinlog := range segment.GetStatslogs() { - statFileNum += len(fieldBinlog.GetBinlogs()) + pool := conc.NewPool[any](paramtable.Get().MetaStoreCfg.ReadConcurrency.GetAsInt()) + futures := make([]*conc.Future[any], 0, len(collectionIDs)) + collectionSegments := make([][]*datapb.SegmentInfo, len(collectionIDs)) + for i, collectionID := range collectionIDs { + i := i + collectionID := collectionID + futures = append(futures, pool.Submit(func() (any, error) { + segments, err := m.catalog.ListSegments(m.ctx, collectionID) + if err != nil { + return nil, err } - metrics.FlushedSegmentFileNum.WithLabelValues(metrics.StatFileLabel).Observe(float64(statFileNum)) + collectionSegments[i] = segments + return nil, nil + })) + } + err = conc.AwaitAll(futures...) + if err != nil { + return err + } - deleteFileNum := 0 - for _, filedBinlog := range segment.GetDeltalogs() { - deleteFileNum += len(filedBinlog.GetBinlogs()) + log.Info("datacoord show segments done", zap.Duration("dur", record.RecordSpan())) + + metrics.DataCoordNumCollections.WithLabelValues().Set(0) + metrics.DataCoordNumSegments.Reset() + numStoredRows := int64(0) + numSegments := 0 + for _, segments := range collectionSegments { + numSegments += len(segments) + for _, segment := range segments { + // segments from catalog.ListSegments will not have logPath + m.segments.SetSegment(segment.ID, NewSegmentInfo(segment)) + metrics.DataCoordNumSegments.WithLabelValues(segment.GetState().String(), segment.GetLevel().String(), getSortStatus(segment.GetIsSorted())).Inc() + if segment.State == commonpb.SegmentState_Flushed { + numStoredRows += segment.NumOfRows + + insertFileNum := 0 + for _, fieldBinlog := range segment.GetBinlogs() { + insertFileNum += len(fieldBinlog.GetBinlogs()) + } + metrics.FlushedSegmentFileNum.WithLabelValues(metrics.InsertFileLabel).Observe(float64(insertFileNum)) + + statFileNum := 0 + for _, fieldBinlog := range segment.GetStatslogs() { + statFileNum += len(fieldBinlog.GetBinlogs()) + } + metrics.FlushedSegmentFileNum.WithLabelValues(metrics.StatFileLabel).Observe(float64(statFileNum)) + + deleteFileNum := 0 + for _, filedBinlog := range segment.GetDeltalogs() { + deleteFileNum += len(filedBinlog.GetBinlogs()) + } + metrics.FlushedSegmentFileNum.WithLabelValues(metrics.DeleteFileLabel).Observe(float64(deleteFileNum)) } - metrics.FlushedSegmentFileNum.WithLabelValues(metrics.DeleteFileLabel).Observe(float64(deleteFileNum)) } } @@ -234,7 +269,7 @@ func (m *meta) reloadFromKV() error { Set(float64(ts.Unix())) } - log.Info("DataCoord meta reloadFromKV done", zap.Duration("duration", record.ElapseSpan())) + log.Info("DataCoord meta reloadFromKV done", zap.Int("numSegments", numSegments), zap.Duration("duration", record.ElapseSpan())) return nil } diff --git a/internal/datacoord/meta_test.go b/internal/datacoord/meta_test.go index 8872e2ff839cc..53766ee72c1b9 100644 --- a/internal/datacoord/meta_test.go +++ b/internal/datacoord/meta_test.go @@ -18,6 +18,7 @@ package datacoord import ( "context" + "math/rand" "sync/atomic" "testing" @@ -40,6 +41,7 @@ import ( "github.com/milvus-io/milvus/internal/metastore/model" "github.com/milvus-io/milvus/internal/mocks" "github.com/milvus-io/milvus/internal/proto/datapb" + "github.com/milvus-io/milvus/internal/proto/rootcoordpb" "github.com/milvus-io/milvus/pkg/common" "github.com/milvus-io/milvus/pkg/kv" "github.com/milvus-io/milvus/pkg/metrics" @@ -70,9 +72,20 @@ func (suite *MetaReloadSuite) resetMock() { func (suite *MetaReloadSuite) TestReloadFromKV() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + suite.Run("ListSegments_fail", func() { defer suite.resetMock() - suite.catalog.EXPECT().ListSegments(mock.Anything).Return(nil, errors.New("mock")) + brk := broker.NewMockBroker(suite.T()) + brk.EXPECT().ShowCollectionsInternal(mock.Anything).Return(&rootcoordpb.ShowCollectionsInternalResponse{ + Status: merr.Success(), + DbCollections: []*rootcoordpb.DBCollections{ + { + DbName: "db_1", + CollectionIDs: []int64{100}, + }, + }, + }, nil) + suite.catalog.EXPECT().ListSegments(mock.Anything, mock.Anything).Return(nil, errors.New("mock")) suite.catalog.EXPECT().ListIndexes(mock.Anything).Return([]*model.Index{}, nil) suite.catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return([]*model.SegmentIndex{}, nil) suite.catalog.EXPECT().ListAnalyzeTasks(mock.Anything).Return(nil, nil) @@ -80,14 +93,15 @@ func (suite *MetaReloadSuite) TestReloadFromKV() { suite.catalog.EXPECT().ListPartitionStatsInfos(mock.Anything).Return(nil, nil) suite.catalog.EXPECT().ListStatsTasks(mock.Anything).Return(nil, nil) - _, err := newMeta(ctx, suite.catalog, nil) + _, err := newMeta(ctx, suite.catalog, nil, brk) suite.Error(err) }) suite.Run("ListChannelCheckpoint_fail", func() { defer suite.resetMock() - - suite.catalog.EXPECT().ListSegments(mock.Anything).Return([]*datapb.SegmentInfo{}, nil) + brk := broker.NewMockBroker(suite.T()) + brk.EXPECT().ShowCollectionsInternal(mock.Anything).Return(nil, nil) + suite.catalog.EXPECT().ListSegments(mock.Anything, mock.Anything).Return([]*datapb.SegmentInfo{}, nil) suite.catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(nil, errors.New("mock")) suite.catalog.EXPECT().ListIndexes(mock.Anything).Return([]*model.Index{}, nil) suite.catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return([]*model.SegmentIndex{}, nil) @@ -96,19 +110,29 @@ func (suite *MetaReloadSuite) TestReloadFromKV() { suite.catalog.EXPECT().ListPartitionStatsInfos(mock.Anything).Return(nil, nil) suite.catalog.EXPECT().ListStatsTasks(mock.Anything).Return(nil, nil) - _, err := newMeta(ctx, suite.catalog, nil) + _, err := newMeta(ctx, suite.catalog, nil, brk) suite.Error(err) }) suite.Run("ok", func() { defer suite.resetMock() + brk := broker.NewMockBroker(suite.T()) + brk.EXPECT().ShowCollectionsInternal(mock.Anything).Return(&rootcoordpb.ShowCollectionsInternalResponse{ + Status: merr.Success(), + DbCollections: []*rootcoordpb.DBCollections{ + { + DbName: "db_1", + CollectionIDs: []int64{1}, + }, + }, + }, nil) suite.catalog.EXPECT().ListIndexes(mock.Anything).Return([]*model.Index{}, nil) suite.catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return([]*model.SegmentIndex{}, nil) suite.catalog.EXPECT().ListAnalyzeTasks(mock.Anything).Return(nil, nil) suite.catalog.EXPECT().ListCompactionTask(mock.Anything).Return(nil, nil) suite.catalog.EXPECT().ListPartitionStatsInfos(mock.Anything).Return(nil, nil) suite.catalog.EXPECT().ListStatsTasks(mock.Anything).Return(nil, nil) - suite.catalog.EXPECT().ListSegments(mock.Anything).Return([]*datapb.SegmentInfo{ + suite.catalog.EXPECT().ListSegments(mock.Anything, mock.Anything).Return([]*datapb.SegmentInfo{ { ID: 1, CollectionID: 1, @@ -124,11 +148,56 @@ func (suite *MetaReloadSuite) TestReloadFromKV() { }, }, nil) - _, err := newMeta(ctx, suite.catalog, nil) + _, err := newMeta(ctx, suite.catalog, nil, brk) suite.NoError(err) suite.MetricsEqual(metrics.DataCoordNumSegments.WithLabelValues(metrics.FlushedSegmentLabel, datapb.SegmentLevel_Legacy.String(), "unsorted"), 1) }) + + suite.Run("test list segments", func() { + defer suite.resetMock() + brk := broker.NewMockBroker(suite.T()) + brk.EXPECT().ShowCollectionsInternal(mock.Anything).Return(&rootcoordpb.ShowCollectionsInternalResponse{ + Status: merr.Success(), + DbCollections: []*rootcoordpb.DBCollections{ + { + DbName: "db_1", + CollectionIDs: []int64{100, 101, 102}, + }, + { + DbName: "db_2", + CollectionIDs: []int64{200, 201, 202}, + }, + }, + }, nil) + + suite.catalog.EXPECT().ListIndexes(mock.Anything).Return([]*model.Index{}, nil) + suite.catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return([]*model.SegmentIndex{}, nil) + suite.catalog.EXPECT().ListAnalyzeTasks(mock.Anything).Return(nil, nil) + suite.catalog.EXPECT().ListCompactionTask(mock.Anything).Return(nil, nil) + suite.catalog.EXPECT().ListPartitionStatsInfos(mock.Anything).Return(nil, nil) + suite.catalog.EXPECT().ListStatsTasks(mock.Anything).Return(nil, nil) + suite.catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(nil, nil) + + suite.catalog.EXPECT().ListSegments(mock.Anything, mock.Anything).RunAndReturn( + func(ctx context.Context, collectionID int64) ([]*datapb.SegmentInfo, error) { + return []*datapb.SegmentInfo{ + { + ID: rand.Int63(), + CollectionID: collectionID, + State: commonpb.SegmentState_Flushed, + }, + }, nil + }) + + meta, err := newMeta(ctx, suite.catalog, nil, brk) + suite.NoError(err) + for _, collectionID := range []int64{100, 101, 102, 200, 201, 202} { + segments := meta.GetSegmentsOfCollection(ctx, collectionID) + suite.Len(segments, 1) + suite.Equal(collectionID, segments[0].GetCollectionID()) + } + }) } type MetaBasicSuite struct { @@ -150,7 +219,7 @@ func (suite *MetaBasicSuite) SetupTest() { suite.partIDs = []int64{100, 101} suite.channelName = "c1" - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(suite.T()) suite.Require().NoError(err) suite.meta = meta @@ -456,7 +525,7 @@ func TestMeta_Basic(t *testing.T) { const channelName = "c1" // mockAllocator := newMockAllocator(t) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) testSchema := newTestSchema() @@ -546,7 +615,9 @@ func TestMeta_Basic(t *testing.T) { metakv.EXPECT().WalkWithPrefix(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() metakv.EXPECT().LoadWithPrefix(mock.Anything, mock.Anything).Return(nil, nil, nil).Maybe() catalog := datacoord.NewCatalog(metakv, "", "") - meta, err := newMeta(context.TODO(), catalog, nil) + broker := broker.NewMockBroker(t) + broker.EXPECT().ShowCollectionsInternal(mock.Anything).Return(nil, nil) + meta, err := newMeta(context.TODO(), catalog, nil, broker) assert.NoError(t, err) err = meta.AddSegment(context.TODO(), NewSegmentInfo(&datapb.SegmentInfo{})) @@ -561,7 +632,7 @@ func TestMeta_Basic(t *testing.T) { metakv2.EXPECT().LoadWithPrefix(mock.Anything, mock.Anything).Return(nil, nil, nil).Maybe() metakv2.EXPECT().MultiSaveAndRemoveWithPrefix(mock.Anything, mock.Anything, mock.Anything).Return(errors.New("failed")) catalog = datacoord.NewCatalog(metakv2, "", "") - meta, err = newMeta(context.TODO(), catalog, nil) + meta, err = newMeta(context.TODO(), catalog, nil, broker) assert.NoError(t, err) // nil, since no segment yet err = meta.DropSegment(context.TODO(), 0) @@ -574,7 +645,7 @@ func TestMeta_Basic(t *testing.T) { assert.Error(t, err) catalog = datacoord.NewCatalog(metakv, "", "") - meta, err = newMeta(context.TODO(), catalog, nil) + meta, err = newMeta(context.TODO(), catalog, nil, broker) assert.NoError(t, err) assert.NotNil(t, meta) }) @@ -694,7 +765,7 @@ func TestMeta_Basic(t *testing.T) { }) t.Run("Test AddAllocation", func(t *testing.T) { - meta, _ := newMemoryMeta() + meta, _ := newMemoryMeta(t) err := meta.AddAllocation(1, &Allocation{ SegmentID: 1, NumOfRows: 1, @@ -705,7 +776,7 @@ func TestMeta_Basic(t *testing.T) { } func TestGetUnFlushedSegments(t *testing.T) { - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) s1 := &datapb.SegmentInfo{ ID: 0, @@ -734,7 +805,7 @@ func TestGetUnFlushedSegments(t *testing.T) { func TestUpdateSegmentsInfo(t *testing.T) { t.Run("normal", func(t *testing.T) { - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) segment1 := NewSegmentInfo(&datapb.SegmentInfo{ @@ -785,7 +856,7 @@ func TestUpdateSegmentsInfo(t *testing.T) { }) t.Run("update compacted segment", func(t *testing.T) { - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) // segment not found @@ -811,7 +882,7 @@ func TestUpdateSegmentsInfo(t *testing.T) { assert.NoError(t, err) }) t.Run("update non-existed segment", func(t *testing.T) { - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) err = meta.UpdateSegmentsInfo( @@ -873,7 +944,7 @@ func TestUpdateSegmentsInfo(t *testing.T) { }) t.Run("update empty segment into flush", func(t *testing.T) { - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) meta.AddSegment(context.Background(), &SegmentInfo{SegmentInfo: &datapb.SegmentInfo{ID: 1, State: commonpb.SegmentState_Growing}}) err = meta.UpdateSegmentsInfo( @@ -885,7 +956,7 @@ func TestUpdateSegmentsInfo(t *testing.T) { }) t.Run("update checkpoints and start position of non existed segment", func(t *testing.T) { - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) segment1 := &SegmentInfo{SegmentInfo: &datapb.SegmentInfo{ID: 1, State: commonpb.SegmentState_Growing}} @@ -908,7 +979,9 @@ func TestUpdateSegmentsInfo(t *testing.T) { metakv.EXPECT().WalkWithPrefix(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() metakv.EXPECT().LoadWithPrefix(mock.Anything, mock.Anything).Return(nil, nil, nil).Maybe() catalog := datacoord.NewCatalog(metakv, "", "") - meta, err := newMeta(context.TODO(), catalog, nil) + broker := broker.NewMockBroker(t) + broker.EXPECT().ShowCollectionsInternal(mock.Anything).Return(nil, nil) + meta, err := newMeta(context.TODO(), catalog, nil, broker) assert.NoError(t, err) segmentInfo := &SegmentInfo{ @@ -1201,7 +1274,7 @@ func TestChannelCP(t *testing.T) { } t.Run("UpdateChannelCheckpoint", func(t *testing.T) { - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) // nil position @@ -1213,7 +1286,7 @@ func TestChannelCP(t *testing.T) { }) t.Run("UpdateChannelCheckpoints", func(t *testing.T) { - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) assert.Equal(t, 0, len(meta.channelCPs.checkpoints)) @@ -1229,7 +1302,7 @@ func TestChannelCP(t *testing.T) { }) t.Run("GetChannelCheckpoint", func(t *testing.T) { - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) position := meta.GetChannelCheckpoint(mockVChannel) @@ -1244,7 +1317,7 @@ func TestChannelCP(t *testing.T) { }) t.Run("DropChannelCheckpoint", func(t *testing.T) { - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) err = meta.DropChannelCheckpoint(mockVChannel) diff --git a/internal/datacoord/mock_test.go b/internal/datacoord/mock_test.go index 9f64dbb7223af..96797452bcbb7 100644 --- a/internal/datacoord/mock_test.go +++ b/internal/datacoord/mock_test.go @@ -31,6 +31,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/milvuspb" "github.com/milvus-io/milvus-proto/go-api/v2/schemapb" "github.com/milvus-io/milvus/internal/datacoord/allocator" + "github.com/milvus-io/milvus/internal/datacoord/broker" memkv "github.com/milvus-io/milvus/internal/kv/mem" "github.com/milvus-io/milvus/internal/metastore/kv/datacoord" "github.com/milvus-io/milvus/internal/proto/datapb" @@ -88,9 +89,11 @@ func (mm *metaMemoryKV) CompareVersionAndSwap(ctx context.Context, key string, v panic("implement me") } -func newMemoryMeta() (*meta, error) { +func newMemoryMeta(t *testing.T) (*meta, error) { catalog := datacoord.NewCatalog(NewMetaMemoryKV(), "", "") - return newMeta(context.TODO(), catalog, nil) + broker := broker.NewMockBroker(t) + broker.EXPECT().ShowCollectionsInternal(mock.Anything).Return(nil, nil) + return newMeta(context.TODO(), catalog, nil, broker) } func newMockAllocator(t *testing.T) *allocator.MockAllocator { @@ -428,6 +431,12 @@ func (m *mockRootCoordClient) ShowCollections(ctx context.Context, req *milvuspb }, nil } +func (m *mockRootCoordClient) ShowCollectionsInternal(ctx context.Context, req *rootcoordpb.ShowCollectionsInternalRequest, opts ...grpc.CallOption) (*rootcoordpb.ShowCollectionsInternalResponse, error) { + return &rootcoordpb.ShowCollectionsInternalResponse{ + Status: merr.Success(), + }, nil +} + func (m *mockRootCoordClient) CreateDatabase(ctx context.Context, in *milvuspb.CreateDatabaseRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { panic("not implemented") // TODO: Implement } diff --git a/internal/datacoord/segment_manager_test.go b/internal/datacoord/segment_manager_test.go index cd8c330eea853..cabd50f97a06e 100644 --- a/internal/datacoord/segment_manager_test.go +++ b/internal/datacoord/segment_manager_test.go @@ -30,6 +30,7 @@ import ( "github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/schemapb" "github.com/milvus-io/milvus/internal/datacoord/allocator" + "github.com/milvus-io/milvus/internal/datacoord/broker" etcdkv "github.com/milvus-io/milvus/internal/kv/etcd" mockkv "github.com/milvus-io/milvus/internal/kv/mocks" "github.com/milvus-io/milvus/internal/metastore/kv/datacoord" @@ -42,7 +43,7 @@ func TestManagerOptions(t *testing.T) { // ctx := context.Background() paramtable.Init() mockAllocator := newMockAllocator(t) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) segmentManager, _ := newSegmentManager(meta, mockAllocator) t.Run("test with alloc helper", func(t *testing.T) { @@ -103,7 +104,7 @@ func TestAllocSegment(t *testing.T) { paramtable.Init() Params.Save(Params.DataCoordCfg.AllocLatestExpireAttempt.Key, "1") mockAllocator := newMockAllocator(t) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) segmentManager, _ := newSegmentManager(meta, mockAllocator) @@ -185,7 +186,9 @@ func TestLastExpireReset(t *testing.T) { metaKV := etcdkv.NewEtcdKV(etcdCli, rootPath) metaKV.RemoveWithPrefix(ctx, "") catalog := datacoord.NewCatalog(metaKV, "", "") - meta, err := newMeta(context.TODO(), catalog, nil) + broker := broker.NewMockBroker(t) + broker.EXPECT().ShowCollectionsInternal(mock.Anything).Return(nil, nil) + meta, err := newMeta(context.TODO(), catalog, nil, broker) assert.Nil(t, err) // add collection channelName := "c1" @@ -236,7 +239,7 @@ func TestLastExpireReset(t *testing.T) { newMetaKV := etcdkv.NewEtcdKV(newEtcdCli, rootPath) defer newMetaKV.RemoveWithPrefix(ctx, "") newCatalog := datacoord.NewCatalog(newMetaKV, "", "") - restartedMeta, err := newMeta(context.TODO(), newCatalog, nil) + restartedMeta, err := newMeta(context.TODO(), newCatalog, nil, broker) restartedMeta.AddCollection(&collectionInfo{ID: collID, Schema: schema}) assert.Nil(t, err) newSegmentManager, _ := newSegmentManager(restartedMeta, mockAllocator) @@ -265,7 +268,7 @@ func TestLoadSegmentsFromMeta(t *testing.T) { ctx := context.Background() paramtable.Init() mockAllocator := newMockAllocator(t) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) schema := newTestSchema() @@ -315,7 +318,7 @@ func TestLoadSegmentsFromMeta(t *testing.T) { func TestSaveSegmentsToMeta(t *testing.T) { paramtable.Init() mockAllocator := newMockAllocator(t) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) schema := newTestSchema() @@ -337,7 +340,7 @@ func TestSaveSegmentsToMeta(t *testing.T) { func TestSaveSegmentsToMetaWithSpecificSegments(t *testing.T) { paramtable.Init() mockAllocator := newMockAllocator(t) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) schema := newTestSchema() @@ -359,7 +362,7 @@ func TestSaveSegmentsToMetaWithSpecificSegments(t *testing.T) { func TestDropSegment(t *testing.T) { paramtable.Init() mockAllocator := newMockAllocator(t) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) schema := newTestSchema() @@ -382,7 +385,7 @@ func TestDropSegment(t *testing.T) { func TestAllocRowsLargerThanOneSegment(t *testing.T) { paramtable.Init() mockAllocator := newMockAllocator(t) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) schema := newTestSchema() @@ -404,7 +407,7 @@ func TestAllocRowsLargerThanOneSegment(t *testing.T) { func TestExpireAllocation(t *testing.T) { paramtable.Init() mockAllocator := newMockAllocator(t) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) schema := newTestSchema() @@ -447,7 +450,7 @@ func TestGetFlushableSegments(t *testing.T) { t.Run("get flushable segments between small interval", func(t *testing.T) { paramtable.Init() mockAllocator := newMockAllocator(t) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) schema := newTestSchema() @@ -493,7 +496,7 @@ func TestTryToSealSegment(t *testing.T) { t.Run("normal seal with segment policies", func(t *testing.T) { paramtable.Init() mockAllocator := newMockAllocator(t) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) schema := newTestSchema() @@ -518,7 +521,7 @@ func TestTryToSealSegment(t *testing.T) { t.Run("normal seal with channel seal policies", func(t *testing.T) { paramtable.Init() mockAllocator := newMockAllocator(t) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) schema := newTestSchema() @@ -543,7 +546,7 @@ func TestTryToSealSegment(t *testing.T) { t.Run("normal seal with both segment & channel seal policy", func(t *testing.T) { paramtable.Init() mockAllocator := newMockAllocator(t) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) schema := newTestSchema() @@ -570,7 +573,7 @@ func TestTryToSealSegment(t *testing.T) { t.Run("test sealByMaxBinlogFileNumberPolicy", func(t *testing.T) { paramtable.Init() mockAllocator := newMockAllocator(t) - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) schema := newTestSchema() @@ -654,7 +657,9 @@ func TestTryToSealSegment(t *testing.T) { mockAllocator := newMockAllocator(t) memoryKV := NewMetaMemoryKV() catalog := datacoord.NewCatalog(memoryKV, "", "") - meta, err := newMeta(context.TODO(), catalog, nil) + broker := broker.NewMockBroker(t) + broker.EXPECT().ShowCollectionsInternal(mock.Anything).Return(nil, nil) + meta, err := newMeta(context.TODO(), catalog, nil, broker) assert.NoError(t, err) schema := newTestSchema() @@ -683,7 +688,9 @@ func TestTryToSealSegment(t *testing.T) { mockAllocator := newMockAllocator(t) memoryKV := NewMetaMemoryKV() catalog := datacoord.NewCatalog(memoryKV, "", "") - meta, err := newMeta(context.TODO(), catalog, nil) + broker := broker.NewMockBroker(t) + broker.EXPECT().ShowCollectionsInternal(mock.Anything).Return(nil, nil) + meta, err := newMeta(context.TODO(), catalog, nil, broker) assert.NoError(t, err) schema := newTestSchema() diff --git a/internal/datacoord/server.go b/internal/datacoord/server.go index 7256edc15044f..2d0bc9f8a04ec 100644 --- a/internal/datacoord/server.go +++ b/internal/datacoord/server.go @@ -51,6 +51,7 @@ import ( "github.com/milvus-io/milvus/internal/storage" streamingcoord "github.com/milvus-io/milvus/internal/streamingcoord/server" "github.com/milvus-io/milvus/internal/types" + "github.com/milvus-io/milvus/internal/util/componentutil" "github.com/milvus-io/milvus/internal/util/dependency" "github.com/milvus-io/milvus/internal/util/sessionutil" "github.com/milvus-io/milvus/internal/util/streamingutil" @@ -345,12 +346,20 @@ func (s *Server) RegisterStreamingCoordGRPCService(server *grpc.Server) { } func (s *Server) initDataCoord() error { - s.stateCode.Store(commonpb.StateCode_Initializing) - var err error - if err = s.initRootCoordClient(); err != nil { + // wait for master init or healthy + log.Info("DataCoord try to wait for RootCoord ready") + if err := s.initRootCoordClient(); err != nil { return err } log.Info("init rootcoord client done") + err := componentutil.WaitForComponentHealthy(s.ctx, s.rootCoordClient, "RootCoord", 1000000, time.Millisecond*200) + if err != nil { + log.Error("DataCoord wait for RootCoord ready failed", zap.Error(err)) + return err + } + log.Info("DataCoord report RootCoord ready") + + s.stateCode.Store(commonpb.StateCode_Initializing) s.broker = broker.NewCoordinatorBroker(s.rootCoordClient) s.allocator = allocator.NewRootCoordAllocator(s.rootCoordClient) @@ -685,7 +694,7 @@ func (s *Server) initMeta(chunkManager storage.ChunkManager) error { reloadEtcdFn := func() error { var err error catalog := datacoord.NewCatalog(s.kv, chunkManager.RootPath(), s.metaRootPath) - s.meta, err = newMeta(s.ctx, catalog, chunkManager) + s.meta, err = newMeta(s.ctx, catalog, chunkManager, s.broker) if err != nil { return err } diff --git a/internal/datacoord/server_test.go b/internal/datacoord/server_test.go index acfb03d8af543..df60852997e2b 100644 --- a/internal/datacoord/server_test.go +++ b/internal/datacoord/server_test.go @@ -2211,7 +2211,7 @@ func TestDataCoordServer_SetSegmentState(t *testing.T) { }) t.Run("dataCoord meta set state not exists", func(t *testing.T) { - meta, err := newMemoryMeta() + meta, err := newMemoryMeta(t) assert.NoError(t, err) svr := newTestServer(t, WithMeta(meta)) defer closeTestServer(t, svr) diff --git a/internal/distributed/rootcoord/client/client.go b/internal/distributed/rootcoord/client/client.go index 942f366990b3f..6dd0840f32c8d 100644 --- a/internal/distributed/rootcoord/client/client.go +++ b/internal/distributed/rootcoord/client/client.go @@ -229,6 +229,18 @@ func (c *Client) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectio }) } +// ShowCollectionsInternal returns all collections, including unhealthy ones. +func (c *Client) ShowCollectionsInternal(ctx context.Context, in *rootcoordpb.ShowCollectionsInternalRequest, opts ...grpc.CallOption) (*rootcoordpb.ShowCollectionsInternalResponse, error) { + in = typeutil.Clone(in) + commonpbutil.UpdateMsgBase( + in.GetBase(), + commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())), + ) + return wrapGrpcCall(ctx, c, func(client rootcoordpb.RootCoordClient) (*rootcoordpb.ShowCollectionsInternalResponse, error) { + return client.ShowCollectionsInternal(ctx, in) + }) +} + func (c *Client) AlterCollection(ctx context.Context, request *milvuspb.AlterCollectionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { request = typeutil.Clone(request) commonpbutil.UpdateMsgBase( diff --git a/internal/distributed/rootcoord/client/client_test.go b/internal/distributed/rootcoord/client/client_test.go index b70883dcf742f..a94106a376509 100644 --- a/internal/distributed/rootcoord/client/client_test.go +++ b/internal/distributed/rootcoord/client/client_test.go @@ -104,6 +104,10 @@ func Test_NewClient(t *testing.T) { r, err := client.ShowCollections(ctx, nil) retCheck(retNotNil, r, err) } + { + r, err := client.ShowCollectionsInternal(ctx, nil) + retCheck(retNotNil, r, err) + } { r, err := client.CreatePartition(ctx, nil) retCheck(retNotNil, r, err) @@ -350,6 +354,10 @@ func Test_NewClient(t *testing.T) { rTimeout, err := client.ShowCollections(shortCtx, nil) retCheck(rTimeout, err) } + { + rTimeout, err := client.ShowCollectionsInternal(shortCtx, nil) + retCheck(rTimeout, err) + } { rTimeout, err := client.CreatePartition(shortCtx, nil) retCheck(rTimeout, err) diff --git a/internal/distributed/rootcoord/service.go b/internal/distributed/rootcoord/service.go index 028a9178e6608..a0e21bed48f2d 100644 --- a/internal/distributed/rootcoord/service.go +++ b/internal/distributed/rootcoord/service.go @@ -409,6 +409,11 @@ func (s *Server) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectio return s.rootCoord.ShowCollections(ctx, in) } +// ShowCollectionsInternal returns all collections, including unhealthy ones. +func (s *Server) ShowCollectionsInternal(ctx context.Context, in *rootcoordpb.ShowCollectionsInternalRequest) (*rootcoordpb.ShowCollectionsInternalResponse, error) { + return s.rootCoord.ShowCollectionsInternal(ctx, in) +} + // CreatePartition creates a partition in a collection func (s *Server) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) { return s.rootCoord.CreatePartition(ctx, in) diff --git a/internal/metastore/catalog.go b/internal/metastore/catalog.go index e036d0d0fcfd1..1d0c7bdf0c8ae 100644 --- a/internal/metastore/catalog.go +++ b/internal/metastore/catalog.go @@ -120,7 +120,7 @@ type BinlogsIncrement struct { //go:generate mockery --name=DataCoordCatalog --with-expecter type DataCoordCatalog interface { - ListSegments(ctx context.Context) ([]*datapb.SegmentInfo, error) + ListSegments(ctx context.Context, collectionID int64) ([]*datapb.SegmentInfo, error) AddSegment(ctx context.Context, segment *datapb.SegmentInfo) error // TODO Remove this later, we should update flush segments info for each segment separately, so far we still need transaction AlterSegments(ctx context.Context, newSegments []*datapb.SegmentInfo, binlogs ...BinlogsIncrement) error @@ -186,7 +186,7 @@ type QueryCoordCatalog interface { SavePartition(ctx context.Context, info ...*querypb.PartitionLoadInfo) error SaveReplica(ctx context.Context, replicas ...*querypb.Replica) error GetCollections(ctx context.Context) ([]*querypb.CollectionLoadInfo, error) - GetPartitions(ctx context.Context) (map[int64][]*querypb.PartitionLoadInfo, error) + GetPartitions(ctx context.Context, collectionIDs []int64) (map[int64][]*querypb.PartitionLoadInfo, error) GetReplicas(ctx context.Context) ([]*querypb.Replica, error) ReleaseCollection(ctx context.Context, collection int64) error ReleasePartition(ctx context.Context, collection int64, partitions ...int64) error diff --git a/internal/metastore/kv/datacoord/kv_catalog.go b/internal/metastore/kv/datacoord/kv_catalog.go index 989fb3e2e0d04..736b671d54874 100644 --- a/internal/metastore/kv/datacoord/kv_catalog.go +++ b/internal/metastore/kv/datacoord/kv_catalog.go @@ -65,7 +65,7 @@ func NewCatalog(MetaKv kv.MetaKv, chunkManagerRootPath string, metaRootpath stri } } -func (kc *Catalog) ListSegments(ctx context.Context) ([]*datapb.SegmentInfo, error) { +func (kc *Catalog) ListSegments(ctx context.Context, collectionID int64) ([]*datapb.SegmentInfo, error) { group, _ := errgroup.WithContext(ctx) segments := make([]*datapb.SegmentInfo, 0) insertLogs := make(map[typeutil.UniqueID][]*datapb.FieldBinlog, 1) @@ -75,7 +75,7 @@ func (kc *Catalog) ListSegments(ctx context.Context) ([]*datapb.SegmentInfo, err executeFn := func(binlogType storage.BinlogType, result map[typeutil.UniqueID][]*datapb.FieldBinlog) { group.Go(func() error { - ret, err := kc.listBinlogs(ctx, binlogType) + ret, err := kc.listBinlogs(ctx, binlogType, collectionID) if err != nil { return err } @@ -91,7 +91,7 @@ func (kc *Catalog) ListSegments(ctx context.Context) ([]*datapb.SegmentInfo, err executeFn(storage.StatsBinlog, statsLogs) executeFn(storage.BM25Binlog, bm25Logs) group.Go(func() error { - ret, err := kc.listSegments(ctx) + ret, err := kc.listSegments(ctx, collectionID) if err != nil { return err } @@ -111,7 +111,7 @@ func (kc *Catalog) ListSegments(ctx context.Context) ([]*datapb.SegmentInfo, err return segments, nil } -func (kc *Catalog) listSegments(ctx context.Context) ([]*datapb.SegmentInfo, error) { +func (kc *Catalog) listSegments(ctx context.Context, collectionID int64) ([]*datapb.SegmentInfo, error) { segments := make([]*datapb.SegmentInfo, 0) applyFn := func(key []byte, value []byte) error { @@ -136,7 +136,7 @@ func (kc *Catalog) listSegments(ctx context.Context) ([]*datapb.SegmentInfo, err return nil } - err := kc.MetaKv.WalkWithPrefix(ctx, SegmentPrefix+"/", kc.paginationSize, applyFn) + err := kc.MetaKv.WalkWithPrefix(ctx, buildCollectionPrefix(collectionID), kc.paginationSize, applyFn) if err != nil { return nil, err } @@ -144,45 +144,34 @@ func (kc *Catalog) listSegments(ctx context.Context) ([]*datapb.SegmentInfo, err return segments, nil } -func (kc *Catalog) parseBinlogKey(key string, prefixIdx int) (int64, int64, int64, error) { - remainedKey := key[prefixIdx:] - keyWordGroup := strings.Split(remainedKey, "/") +func (kc *Catalog) parseBinlogKey(key string) (int64, error) { + // by-dev/meta/datacoord-meta/binlog/454086059555817418/454086059555817543/454329387504816753/1 + // ---------------------------------|collectionID |partitionID |segmentID |fieldID + keyWordGroup := strings.Split(key, "/") if len(keyWordGroup) < 3 { - return 0, 0, 0, fmt.Errorf("parse key: %s failed, trimmed key:%s", key, remainedKey) + return 0, fmt.Errorf("parse key: %s failed, key:%s", key, key) } - - collectionID, err := strconv.ParseInt(keyWordGroup[0], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("parse key: %s failed, trimmed key:%s, %w", key, remainedKey, err) - } - - partitionID, err := strconv.ParseInt(keyWordGroup[1], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("parse key: %s failed, trimmed key:%s, %w", key, remainedKey, err) - } - - segmentID, err := strconv.ParseInt(keyWordGroup[2], 10, 64) + segmentID, err := strconv.ParseInt(keyWordGroup[len(keyWordGroup)-2], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("parse key: %s failed, trimmed key:%s, %w", key, remainedKey, err) + return 0, fmt.Errorf("parse key failed, key:%s, %w", key, err) } - - return collectionID, partitionID, segmentID, nil + return segmentID, nil } -func (kc *Catalog) listBinlogs(ctx context.Context, binlogType storage.BinlogType) (map[typeutil.UniqueID][]*datapb.FieldBinlog, error) { +func (kc *Catalog) listBinlogs(ctx context.Context, binlogType storage.BinlogType, collectionID int64) (map[typeutil.UniqueID][]*datapb.FieldBinlog, error) { ret := make(map[typeutil.UniqueID][]*datapb.FieldBinlog) var err error var logPathPrefix string switch binlogType { case storage.InsertBinlog: - logPathPrefix = SegmentBinlogPathPrefix + logPathPrefix = fmt.Sprintf("%s/%d", SegmentBinlogPathPrefix, collectionID) case storage.DeleteBinlog: - logPathPrefix = SegmentDeltalogPathPrefix + logPathPrefix = fmt.Sprintf("%s/%d", SegmentDeltalogPathPrefix, collectionID) case storage.StatsBinlog: - logPathPrefix = SegmentStatslogPathPrefix + logPathPrefix = fmt.Sprintf("%s/%d", SegmentStatslogPathPrefix, collectionID) case storage.BM25Binlog: - logPathPrefix = SegmentBM25logPathPrefix + logPathPrefix = fmt.Sprintf("%s/%d", SegmentBM25logPathPrefix, collectionID) default: err = fmt.Errorf("invalid binlog type: %d", binlogType) } @@ -190,13 +179,6 @@ func (kc *Catalog) listBinlogs(ctx context.Context, binlogType storage.BinlogTyp return nil, err } - var prefixIdx int - if len(kc.metaRootpath) == 0 { - prefixIdx = len(logPathPrefix) + 1 - } else { - prefixIdx = len(kc.metaRootpath) + 1 + len(logPathPrefix) + 1 - } - applyFn := func(key []byte, value []byte) error { fieldBinlog := &datapb.FieldBinlog{} err := proto.Unmarshal(value, fieldBinlog) @@ -204,7 +186,7 @@ func (kc *Catalog) listBinlogs(ctx context.Context, binlogType storage.BinlogTyp return fmt.Errorf("failed to unmarshal datapb.FieldBinlog: %d, err:%w", fieldBinlog.FieldID, err) } - _, _, segmentID, err := kc.parseBinlogKey(string(key), prefixIdx) + segmentID, err := kc.parseBinlogKey(string(key)) if err != nil { return fmt.Errorf("prefix:%s, %w", path.Join(kc.metaRootpath, logPathPrefix), err) } diff --git a/internal/metastore/kv/datacoord/kv_catalog_test.go b/internal/metastore/kv/datacoord/kv_catalog_test.go index 992aeb0830aed..4e845ef197ebb 100644 --- a/internal/metastore/kv/datacoord/kv_catalog_test.go +++ b/internal/metastore/kv/datacoord/kv_catalog_test.go @@ -44,7 +44,6 @@ import ( "github.com/milvus-io/milvus/internal/proto/indexpb" "github.com/milvus-io/milvus/pkg/kv/predicates" "github.com/milvus-io/milvus/pkg/util/etcd" - "github.com/milvus-io/milvus/pkg/util/metautil" "github.com/milvus-io/milvus/pkg/util/paramtable" ) @@ -151,7 +150,7 @@ func Test_ListSegments(t *testing.T) { metakv.EXPECT().WalkWithPrefix(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("error")) catalog := NewCatalog(metakv, rootPath, "") - ret, err := catalog.ListSegments(context.TODO()) + ret, err := catalog.ListSegments(context.TODO(), collectionID) assert.Nil(t, ret) assert.Error(t, err) }) @@ -198,7 +197,7 @@ func Test_ListSegments(t *testing.T) { }) catalog := NewCatalog(metakv, rootPath, "") - ret, err := catalog.ListSegments(context.TODO()) + ret, err := catalog.ListSegments(context.TODO(), collectionID) assert.NotNil(t, ret) assert.NoError(t, err) @@ -215,7 +214,7 @@ func Test_ListSegments(t *testing.T) { }) catalog := NewCatalog(metakv, rootPath, "") - ret, err := catalog.ListSegments(context.TODO()) + ret, err := catalog.ListSegments(context.TODO(), collectionID) assert.NotNil(t, ret) assert.NoError(t, err) assert.Zero(t, len(ret)) @@ -256,7 +255,7 @@ func Test_ListSegments(t *testing.T) { return errors.New("should not reach here") }) - ret, err := catalog.ListSegments(context.TODO()) + ret, err := catalog.ListSegments(context.TODO(), collectionID) assert.NotNil(t, ret) assert.NoError(t, err) @@ -745,44 +744,16 @@ func Test_ChannelExists_SaveError(t *testing.T) { func Test_parseBinlogKey(t *testing.T) { catalog := NewCatalog(nil, "", "") - t.Run("parse collection id fail", func(t *testing.T) { - ret1, ret2, ret3, err := catalog.parseBinlogKey("root/err/1/1/1", 5) - assert.Error(t, err) - assert.Equal(t, int64(0), ret1) - assert.Equal(t, int64(0), ret2) - assert.Equal(t, int64(0), ret3) - }) - - t.Run("parse partition id fail", func(t *testing.T) { - ret1, ret2, ret3, err := catalog.parseBinlogKey("root/1/err/1/1", 5) - assert.Error(t, err) - assert.Equal(t, int64(0), ret1) - assert.Equal(t, int64(0), ret2) - assert.Equal(t, int64(0), ret3) - }) - t.Run("parse segment id fail", func(t *testing.T) { - ret1, ret2, ret3, err := catalog.parseBinlogKey("root/1/1/err/1", 5) - assert.Error(t, err) - assert.Equal(t, int64(0), ret1) - assert.Equal(t, int64(0), ret2) - assert.Equal(t, int64(0), ret3) - }) - - t.Run("miss field", func(t *testing.T) { - ret1, ret2, ret3, err := catalog.parseBinlogKey("root/1/1/", 5) + segmentID, err := catalog.parseBinlogKey("root/1/1/err/1") assert.Error(t, err) - assert.Equal(t, int64(0), ret1) - assert.Equal(t, int64(0), ret2) - assert.Equal(t, int64(0), ret3) + assert.Equal(t, int64(0), segmentID) }) t.Run("test ok", func(t *testing.T) { - ret1, ret2, ret3, err := catalog.parseBinlogKey("root/1/1/1/1", 5) + segmentID, err := catalog.parseBinlogKey("root/1/1/1/1") assert.NoError(t, err) - assert.Equal(t, int64(1), ret1) - assert.Equal(t, int64(1), ret2) - assert.Equal(t, int64(1), ret3) + assert.Equal(t, int64(1), segmentID) }) } @@ -1193,7 +1164,7 @@ func BenchmarkCatalog_List1000Segments(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - segments, err := catalog.ListSegments(ctx) + segments, err := catalog.ListSegments(ctx, collectionID) assert.NoError(b, err) for _, s := range segments { assert.NotNil(b, s) @@ -1206,13 +1177,8 @@ func BenchmarkCatalog_List1000Segments(b *testing.B) { func generateSegments(ctx context.Context, catalog *Catalog, n int, rootPath string) { rand.Seed(time.Now().UnixNano()) - var collectionID int64 for i := 0; i < n; i++ { - if collectionID%25 == 0 { - collectionID = rand.Int63() - } - v := rand.Int63() segment := addSegment(rootPath, collectionID, v, v, v) err := catalog.AddSegment(ctx, segment) @@ -1229,7 +1195,7 @@ func addSegment(rootPath string, collectionID, partitionID, segmentID, fieldID i Binlogs: []*datapb.Binlog{ { EntriesNum: 10000, - LogPath: metautil.BuildInsertLogPath(rootPath, collectionID, partitionID, segmentID, fieldID, int64(rand.Int())), + LogID: int64(rand.Int()), }, }, }, @@ -1241,58 +1207,7 @@ func addSegment(rootPath string, collectionID, partitionID, segmentID, fieldID i Binlogs: []*datapb.Binlog{ { EntriesNum: 5, - LogPath: metautil.BuildDeltaLogPath(rootPath, collectionID, partitionID, segmentID, int64(rand.Int())), - }, - }, - }, - } - - statslogs = []*datapb.FieldBinlog{ - { - FieldID: 1, - Binlogs: []*datapb.Binlog{ - { - EntriesNum: 5, - LogPath: metautil.BuildStatsLogPath(rootPath, collectionID, partitionID, segmentID, fieldID, int64(rand.Int())), - }, - }, - }, - } - - return &datapb.SegmentInfo{ - ID: segmentID, - CollectionID: collectionID, - PartitionID: partitionID, - NumOfRows: 10000, - State: commonpb.SegmentState_Flushed, - Binlogs: binlogs, - Deltalogs: deltalogs, - Statslogs: statslogs, - } -} - -func getSegment(rootPath string, collectionID, partitionID, segmentID, fieldID int64, binlogNum int) *datapb.SegmentInfo { - binLogPaths := make([]*datapb.Binlog, binlogNum) - for i := 0; i < binlogNum; i++ { - binLogPaths[i] = &datapb.Binlog{ - EntriesNum: 10000, - LogPath: metautil.BuildInsertLogPath(rootPath, collectionID, partitionID, segmentID, fieldID, int64(i)), - } - } - binlogs = []*datapb.FieldBinlog{ - { - FieldID: fieldID, - Binlogs: binLogPaths, - }, - } - - deltalogs = []*datapb.FieldBinlog{ - { - FieldID: fieldID, - Binlogs: []*datapb.Binlog{ - { - EntriesNum: 5, - LogPath: metautil.BuildDeltaLogPath(rootPath, collectionID, partitionID, segmentID, int64(rand.Int())), + LogID: int64(rand.Int()), }, }, }, @@ -1304,7 +1219,7 @@ func getSegment(rootPath string, collectionID, partitionID, segmentID, fieldID i Binlogs: []*datapb.Binlog{ { EntriesNum: 5, - LogPath: metautil.BuildStatsLogPath(rootPath, collectionID, partitionID, segmentID, fieldID, int64(rand.Int())), + LogID: int64(rand.Int()), }, }, }, diff --git a/internal/metastore/kv/querycoord/kv_catalog.go b/internal/metastore/kv/querycoord/kv_catalog.go index 3531d8e1e43dc..f70a92963e3c5 100644 --- a/internal/metastore/kv/querycoord/kv_catalog.go +++ b/internal/metastore/kv/querycoord/kv_catalog.go @@ -17,6 +17,7 @@ import ( "github.com/milvus-io/milvus/internal/proto/querypb" "github.com/milvus-io/milvus/pkg/kv" "github.com/milvus-io/milvus/pkg/util/compressor" + "github.com/milvus-io/milvus/pkg/util/conc" "github.com/milvus-io/milvus/pkg/util/paramtable" ) @@ -37,12 +38,16 @@ const ( type Catalog struct { cli kv.MetaKv paginationSize int + + pool *conc.Pool[any] } func NewCatalog(cli kv.MetaKv) Catalog { + ioPool := conc.NewPool[any](paramtable.Get().MetaStoreCfg.ReadConcurrency.GetAsInt()) return Catalog{ - cli: cli, + cli: cli, paginationSize: paramtable.Get().MetaStoreCfg.PaginationSize.GetAsInt(), + pool: ioPool, } } @@ -126,23 +131,40 @@ func (s Catalog) GetCollections(ctx context.Context) ([]*querypb.CollectionLoadI return ret, nil } -func (s Catalog) GetPartitions(ctx context.Context) (map[int64][]*querypb.PartitionLoadInfo, error) { - ret := make(map[int64][]*querypb.PartitionLoadInfo) - applyFn := func(key []byte, value []byte) error { - info := querypb.PartitionLoadInfo{} - if err := proto.Unmarshal(value, &info); err != nil { - return err - } - ret[info.GetCollectionID()] = append(ret[info.GetCollectionID()], &info) - return nil +func (s Catalog) GetPartitions(ctx context.Context, collectionIDs []int64) (map[int64][]*querypb.PartitionLoadInfo, error) { + collectionPartitions := make([][]*querypb.PartitionLoadInfo, len(collectionIDs)) + futures := make([]*conc.Future[any], 0, len(collectionIDs)) + for i, collectionID := range collectionIDs { + i := i + collectionID := collectionID + futures = append(futures, s.pool.Submit(func() (any, error) { + prefix := EncodePartitionLoadInfoPrefix(collectionID) + _, values, err := s.cli.LoadWithPrefix(ctx, prefix) + if err != nil { + return nil, err + } + ret := make([]*querypb.PartitionLoadInfo, 0, len(values)) + for _, v := range values { + info := querypb.PartitionLoadInfo{} + if err = proto.Unmarshal([]byte(v), &info); err != nil { + return nil, err + } + ret = append(ret, &info) + } + collectionPartitions[i] = ret + return nil, nil + })) } - - err := s.cli.WalkWithPrefix(ctx, PartitionLoadInfoPrefix, s.paginationSize, applyFn) + err := conc.AwaitAll(futures...) if err != nil { return nil, err } - return ret, nil + result := make(map[int64][]*querypb.PartitionLoadInfo, len(collectionIDs)) + for i, partitions := range collectionPartitions { + result[collectionIDs[i]] = partitions + } + return result, nil } func (s Catalog) GetReplicas(ctx context.Context) ([]*querypb.Replica, error) { @@ -335,6 +357,10 @@ func EncodePartitionLoadInfoKey(collection, partition int64) string { return fmt.Sprintf("%s/%d/%d", PartitionLoadInfoPrefix, collection, partition) } +func EncodePartitionLoadInfoPrefix(collection int64) string { + return fmt.Sprintf("%s/%d/", PartitionLoadInfoPrefix, collection) +} + func encodeReplicaKey(collection, replica int64) string { return fmt.Sprintf("%s/%d/%d", ReplicaPrefix, collection, replica) } diff --git a/internal/metastore/kv/querycoord/kv_catalog_test.go b/internal/metastore/kv/querycoord/kv_catalog_test.go index 1c119e838f611..b145e3dbad87d 100644 --- a/internal/metastore/kv/querycoord/kv_catalog_test.go +++ b/internal/metastore/kv/querycoord/kv_catalog_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/cockroachdb/errors" + "github.com/samber/lo" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" @@ -99,7 +100,9 @@ func (suite *CatalogTestSuite) TestCollectionWithPartition() { suite.NoError(err) suite.Len(collections, 1) suite.Equal(int64(3), collections[0].GetCollectionID()) - partitions, err := suite.catalog.GetPartitions(ctx) + partitions, err := suite.catalog.GetPartitions(ctx, lo.Map(collections, func(collection *querypb.CollectionLoadInfo, _ int) int64 { + return collection.GetCollectionID() + })) suite.NoError(err) suite.Len(partitions, 1) suite.Len(partitions[int64(3)], 1) @@ -123,11 +126,56 @@ func (suite *CatalogTestSuite) TestPartition() { suite.catalog.ReleasePartition(ctx, 1) suite.catalog.ReleasePartition(ctx, 2) - partitions, err := suite.catalog.GetPartitions(ctx) + partitions, err := suite.catalog.GetPartitions(ctx, []int64{0}) suite.NoError(err) suite.Len(partitions, 1) } +func (suite *CatalogTestSuite) TestGetPartitions() { + ctx := context.Background() + suite.catalog.SaveCollection(ctx, &querypb.CollectionLoadInfo{ + CollectionID: 1, + }) + suite.catalog.SavePartition(ctx, &querypb.PartitionLoadInfo{ + CollectionID: 1, + PartitionID: 100, + }) + suite.catalog.SaveCollection(ctx, &querypb.CollectionLoadInfo{ + CollectionID: 2, + }) + suite.catalog.SavePartition(ctx, &querypb.PartitionLoadInfo{ + CollectionID: 2, + PartitionID: 200, + }) + suite.catalog.SaveCollection(ctx, &querypb.CollectionLoadInfo{ + CollectionID: 3, + }) + suite.catalog.SavePartition(ctx, &querypb.PartitionLoadInfo{ + CollectionID: 3, + PartitionID: 300, + }) + + partitions, err := suite.catalog.GetPartitions(ctx, []int64{1, 2, 3}) + suite.NoError(err) + suite.Len(partitions, 3) + suite.Len(partitions[int64(1)], 1) + suite.Len(partitions[int64(2)], 1) + suite.Len(partitions[int64(3)], 1) + partitions, err = suite.catalog.GetPartitions(ctx, []int64{2, 3}) + suite.NoError(err) + suite.Len(partitions, 2) + suite.Len(partitions[int64(2)], 1) + suite.Len(partitions[int64(3)], 1) + partitions, err = suite.catalog.GetPartitions(ctx, []int64{3}) + suite.NoError(err) + suite.Len(partitions, 1) + suite.Len(partitions[int64(3)], 1) + suite.Equal(int64(300), partitions[int64(3)][0].GetPartitionID()) + partitions, err = suite.catalog.GetPartitions(ctx, []int64{}) + suite.NoError(err) + suite.Len(partitions, 0) +} + func (suite *CatalogTestSuite) TestReleaseManyPartitions() { ctx := context.Background() partitionIDs := make([]int64, 0) @@ -141,9 +189,10 @@ func (suite *CatalogTestSuite) TestReleaseManyPartitions() { err := suite.catalog.ReleasePartition(ctx, 1, partitionIDs...) suite.NoError(err) - partitions, err := suite.catalog.GetPartitions(ctx) + partitions, err := suite.catalog.GetPartitions(ctx, []int64{1}) suite.NoError(err) - suite.Len(partitions, 0) + suite.Len(partitions, 1) + suite.Len(partitions[int64(1)], 0) } func (suite *CatalogTestSuite) TestReplica() { diff --git a/internal/metastore/kv/rootcoord/kv_catalog.go b/internal/metastore/kv/rootcoord/kv_catalog.go index aeae788f00456..1679a1ce627fa 100644 --- a/internal/metastore/kv/rootcoord/kv_catalog.go +++ b/internal/metastore/kv/rootcoord/kv_catalog.go @@ -23,10 +23,12 @@ import ( "github.com/milvus-io/milvus/pkg/kv" "github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/util" + "github.com/milvus-io/milvus/pkg/util/conc" "github.com/milvus-io/milvus/pkg/util/crypto" "github.com/milvus-io/milvus/pkg/util/etcd" "github.com/milvus-io/milvus/pkg/util/funcutil" "github.com/milvus-io/milvus/pkg/util/merr" + "github.com/milvus-io/milvus/pkg/util/paramtable" "github.com/milvus-io/milvus/pkg/util/typeutil" ) @@ -38,6 +40,13 @@ import ( type Catalog struct { Txn kv.TxnKV Snapshot kv.SnapShotKV + + pool *conc.Pool[any] +} + +func NewCatalog(metaKV kv.TxnKV, ss kv.SnapShotKV) metastore.RootCoordCatalog { + ioPool := conc.NewPool[any](paramtable.Get().MetaStoreCfg.ReadConcurrency.GetAsInt()) + return &Catalog{Txn: metaKV, Snapshot: ss, pool: ioPool} } func BuildCollectionKey(dbID typeutil.UniqueID, collectionID typeutil.UniqueID) string { @@ -498,7 +507,6 @@ func (kc *Catalog) appendPartitionAndFieldsInfo(ctx context.Context, collMeta *p return collection, nil } -// TODO: This function will be invoked many times if there are many databases, leading to significant overhead. func (kc *Catalog) batchAppendPartitionAndFieldsInfo(ctx context.Context, collMeta []*pb.CollectionInfo, ts typeutil.Timestamp, ) ([]*model.Collection, error) { @@ -798,27 +806,33 @@ func (kc *Catalog) ListCollections(ctx context.Context, dbID int64, ts typeutil. } start := time.Now() - colls := make([]*pb.CollectionInfo, 0, len(vals)) - for _, val := range vals { - collMeta := &pb.CollectionInfo{} - err := proto.Unmarshal([]byte(val), collMeta) - if err != nil { - log.Warn("unmarshal collection info failed", zap.Error(err)) - continue - } - kc.fixDefaultDBIDConsistency(ctx, collMeta, ts) - colls = append(colls, collMeta) + colls := make([]*model.Collection, len(vals)) + futures := make([]*conc.Future[any], 0, len(vals)) + for i, val := range vals { + i := i + val := val + futures = append(futures, kc.pool.Submit(func() (any, error) { + collMeta := &pb.CollectionInfo{} + err := proto.Unmarshal([]byte(val), collMeta) + if err != nil { + log.Warn("unmarshal collection info failed", zap.Error(err)) + return nil, err + } + kc.fixDefaultDBIDConsistency(ctx, collMeta, ts) + collection, err := kc.appendPartitionAndFieldsInfo(ctx, collMeta, ts) + if err != nil { + return nil, err + } + colls[i] = collection + return nil, nil + })) } - log.Info("unmarshal all collection details cost", zap.Int64("db", dbID), zap.Duration("cost", time.Since(start))) - - start = time.Now() - ret, err := kc.batchAppendPartitionAndFieldsInfo(ctx, colls, ts) - log.Info("append partition and fields info cost", zap.Int64("db", dbID), zap.Duration("cost", time.Since(start))) + err = conc.AwaitAll(futures...) if err != nil { return nil, err } - - return ret, nil + log.Info("unmarshal all collection details cost", zap.Int64("db", dbID), zap.Duration("cost", time.Since(start))) + return colls, nil } // fixDefaultDBIDConsistency fix dbID consistency for collectionInfo. @@ -826,12 +840,12 @@ func (kc *Catalog) ListCollections(ctx context.Context, dbID int64, ts typeutil. // all collections in default database should be marked with dbID 1. // this method also update dbid in meta store when dbid is 0 // see also: https://github.com/milvus-io/milvus/issues/33608 -func (kv *Catalog) fixDefaultDBIDConsistency(ctx context.Context, collMeta *pb.CollectionInfo, ts typeutil.Timestamp) { +func (kc *Catalog) fixDefaultDBIDConsistency(ctx context.Context, collMeta *pb.CollectionInfo, ts typeutil.Timestamp) { if collMeta.DbId == util.NonDBID { coll := model.UnmarshalCollectionModel(collMeta) cloned := coll.Clone() cloned.DBID = util.DefaultDBID - kv.alterModifyCollection(ctx, coll, cloned, ts) + kc.alterModifyCollection(ctx, coll, cloned, ts) collMeta.DbId = util.DefaultDBID } diff --git a/internal/metastore/kv/rootcoord/kv_catalog_test.go b/internal/metastore/kv/rootcoord/kv_catalog_test.go index 9cf8dfcfcfd38..b1a3dacd6823d 100644 --- a/internal/metastore/kv/rootcoord/kv_catalog_test.go +++ b/internal/metastore/kv/rootcoord/kv_catalog_test.go @@ -105,7 +105,7 @@ func TestCatalog_ListCollections(t *testing.T) { kv.On("LoadWithPrefix", mock.Anything, CollectionMetaPrefix, ts). Return(nil, nil, targetErr) - kc := Catalog{Snapshot: kv} + kc := NewCatalog(nil, kv) ret, err := kc.ListCollections(ctx, util.NonDBID, ts) assert.ErrorIs(t, err, targetErr) assert.Nil(t, ret) @@ -119,12 +119,8 @@ func TestCatalog_ListCollections(t *testing.T) { assert.NoError(t, err) kv.On("LoadWithPrefix", mock.Anything, CollectionMetaPrefix, ts). Return([]string{"key"}, []string{string(bColl)}, nil) - kv.On("LoadWithPrefix", mock.Anything, mock.MatchedBy( - func(prefix string) bool { - return strings.HasPrefix(prefix, PartitionMetaPrefix) - }), ts). - Return(nil, nil, targetErr) - kc := Catalog{Snapshot: kv} + kv.EXPECT().LoadWithPrefix(mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, targetErr) + kc := NewCatalog(nil, kv) ret, err := kc.ListCollections(ctx, util.NonDBID, ts) assert.ErrorIs(t, err, targetErr) @@ -155,7 +151,7 @@ func TestCatalog_ListCollections(t *testing.T) { return strings.HasPrefix(prefix, FieldMetaPrefix) }), ts). Return(nil, nil, targetErr) - kc := Catalog{Snapshot: kv} + kc := NewCatalog(nil, kv) ret, err := kc.ListCollections(ctx, util.NonDBID, ts) assert.ErrorIs(t, err, targetErr) @@ -171,7 +167,7 @@ func TestCatalog_ListCollections(t *testing.T) { kv.On("LoadWithPrefix", mock.Anything, CollectionMetaPrefix, ts). Return([]string{"key"}, []string{string(bColl)}, nil) kv.On("MultiSaveAndRemove", mock.Anything, mock.Anything, mock.Anything, ts).Return(nil) - kc := Catalog{Snapshot: kv} + kc := NewCatalog(nil, kv) ret, err := kc.ListCollections(ctx, util.NonDBID, ts) assert.NoError(t, err) @@ -218,7 +214,7 @@ func TestCatalog_ListCollections(t *testing.T) { }), ts). Return([]string{"rootcoord/functions/1/1"}, []string{string(fcm)}, nil) - kc := Catalog{Snapshot: kv} + kc := NewCatalog(nil, kv) ret, err := kc.ListCollections(ctx, testDb, ts) assert.NoError(t, err) assert.NotNil(t, ret) @@ -269,7 +265,7 @@ func TestCatalog_ListCollections(t *testing.T) { Return([]string{"rootcoord/functions/1/1"}, []string{string(fcm)}, nil) kv.On("MultiSaveAndRemove", mock.Anything, mock.Anything, mock.Anything, ts).Return(nil) - kc := Catalog{Snapshot: kv} + kc := NewCatalog(nil, kv) ret, err := kc.ListCollections(ctx, util.NonDBID, ts) assert.NoError(t, err) @@ -285,7 +281,7 @@ func TestCatalog_loadCollection(t *testing.T) { ctx := context.Background() kv := mocks.NewSnapShotKV(t) kv.EXPECT().Load(mock.Anything, mock.Anything, mock.Anything).Return("", errors.New("mock")) - kc := Catalog{Snapshot: kv} + kc := NewCatalog(nil, kv).(*Catalog) _, err := kc.loadCollection(ctx, testDb, 1, 0) assert.Error(t, err) }) @@ -294,7 +290,7 @@ func TestCatalog_loadCollection(t *testing.T) { ctx := context.Background() kv := mocks.NewSnapShotKV(t) kv.EXPECT().Load(mock.Anything, mock.Anything, mock.Anything).Return("not in pb format", nil) - kc := Catalog{Snapshot: kv} + kc := NewCatalog(nil, kv).(*Catalog) _, err := kc.loadCollection(ctx, testDb, 1, 0) assert.Error(t, err) }) @@ -306,7 +302,7 @@ func TestCatalog_loadCollection(t *testing.T) { assert.NoError(t, err) kv := mocks.NewSnapShotKV(t) kv.EXPECT().Load(mock.Anything, mock.Anything, mock.Anything).Return(string(value), nil) - kc := Catalog{Snapshot: kv} + kc := NewCatalog(nil, kv).(*Catalog) got, err := kc.loadCollection(ctx, util.DefaultDBID, 1, 0) assert.NoError(t, err) assert.Equal(t, got.GetID(), coll.GetID()) @@ -324,7 +320,7 @@ func TestCatalog_loadCollection(t *testing.T) { kv := mocks.NewSnapShotKV(t) kv.EXPECT().Load(mock.Anything, mock.Anything, mock.Anything).Return(string(value), nil) kv.EXPECT().MultiSaveAndRemove(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) - kc := Catalog{Snapshot: kv} + kc := NewCatalog(nil, kv).(*Catalog) got, err := kc.loadCollection(ctx, util.NonDBID, 1, 0) assert.NoError(t, err) assert.Equal(t, got.GetID(), coll.GetID()) @@ -378,7 +374,7 @@ func Test_partitionExistByName(t *testing.T) { func TestCatalog_GetCollectionByID(t *testing.T) { ctx := context.TODO() ss := mocks.NewSnapShotKV(t) - c := Catalog{Snapshot: ss} + c := NewCatalog(nil, ss) ss.EXPECT().Load(mock.Anything, mock.Anything, mock.Anything).Return("", errors.New("load error")).Twice() coll, err := c.GetCollectionByID(ctx, 0, 1, 1) @@ -415,7 +411,7 @@ func TestCatalog_CreatePartitionV2(t *testing.T) { snapshot.LoadFunc = func(ctx context.Context, key string, ts typeutil.Timestamp) (string, error) { return "", errors.New("mock") } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) err := kc.CreatePartition(ctx, 0, &model.Partition{}, 0) assert.Error(t, err) }) @@ -437,7 +433,7 @@ func TestCatalog_CreatePartitionV2(t *testing.T) { return errors.New("mock") } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) err = kc.CreatePartition(ctx, 0, &model.Partition{}, 0) assert.Error(t, err) @@ -462,7 +458,7 @@ func TestCatalog_CreatePartitionV2(t *testing.T) { return string(value), nil } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) err = kc.CreatePartition(ctx, 0, &model.Partition{PartitionID: partID}, 0) assert.Error(t, err) @@ -481,7 +477,7 @@ func TestCatalog_CreatePartitionV2(t *testing.T) { return string(value), nil } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) err = kc.CreatePartition(ctx, 0, &model.Partition{PartitionName: partition}, 0) assert.Error(t, err) @@ -507,7 +503,7 @@ func TestCatalog_CreatePartitionV2(t *testing.T) { return errors.New("mock") } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) err = kc.CreatePartition(ctx, 0, &model.Partition{}, 0) assert.Error(t, err) @@ -528,7 +524,7 @@ func TestCatalog_CreateAliasV2(t *testing.T) { return errors.New("mock") } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) err := kc.CreateAlias(ctx, &model.Alias{}, 0) assert.Error(t, err) @@ -549,7 +545,7 @@ func TestCatalog_listPartitionsAfter210(t *testing.T) { return nil, nil, errors.New("mock") } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) _, err := kc.listPartitionsAfter210(ctx, 1, 0) assert.Error(t, err) @@ -563,7 +559,7 @@ func TestCatalog_listPartitionsAfter210(t *testing.T) { return []string{"key"}, []string{"not in pb format"}, nil } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) _, err := kc.listPartitionsAfter210(ctx, 1, 0) assert.Error(t, err) @@ -581,7 +577,7 @@ func TestCatalog_listPartitionsAfter210(t *testing.T) { return []string{"key"}, []string{string(value)}, nil } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) got, err := kc.listPartitionsAfter210(ctx, 1, 0) assert.NoError(t, err) @@ -607,7 +603,7 @@ func TestCatalog_listFieldsAfter210(t *testing.T) { return nil, nil, errors.New("mock") } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) _, err := kc.listFieldsAfter210(ctx, 1, 0) assert.Error(t, err) @@ -621,7 +617,7 @@ func TestCatalog_listFieldsAfter210(t *testing.T) { return []string{"key"}, []string{"not in pb format"}, nil } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) _, err := kc.listFieldsAfter210(ctx, 1, 0) assert.Error(t, err) @@ -639,7 +635,7 @@ func TestCatalog_listFieldsAfter210(t *testing.T) { return []string{"key"}, []string{string(value)}, nil } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) got, err := kc.listFieldsAfter210(ctx, 1, 0) assert.NoError(t, err) @@ -656,7 +652,7 @@ func TestCatalog_AlterAliasV2(t *testing.T) { return errors.New("mock") } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) err := kc.AlterAlias(ctx, &model.Alias{}, 0) assert.Error(t, err) @@ -705,7 +701,7 @@ func TestCatalog_DropPartitionV2(t *testing.T) { snapshot.On("Load", mock.Anything, mock.Anything, mock.Anything).Return("not in codec format", nil) - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) err := kc.DropPartition(ctx, 0, 100, 101, 0) assert.Error(t, err) @@ -718,7 +714,7 @@ func TestCatalog_DropPartitionV2(t *testing.T) { snapshot.On("Load", mock.Anything, mock.Anything, mock.Anything).Return("", merr.WrapErrIoKeyNotFound("partition")) - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) err := kc.DropPartition(ctx, 0, 100, 101, 0) assert.NoError(t, err) @@ -739,7 +735,7 @@ func TestCatalog_DropPartitionV2(t *testing.T) { return errors.New("mock") } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) err = kc.DropPartition(ctx, 0, 100, 101, 0) assert.Error(t, err) @@ -771,7 +767,7 @@ func TestCatalog_DropPartitionV2(t *testing.T) { return errors.New("mock") } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) err = kc.DropPartition(ctx, 0, 100, 101, 0) assert.Error(t, err) @@ -792,7 +788,7 @@ func TestCatalog_DropAliasV2(t *testing.T) { return errors.New("mock") } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) err := kc.DropAlias(ctx, testDb, "alias", 0) assert.Error(t, err) @@ -813,7 +809,7 @@ func TestCatalog_listAliasesBefore210(t *testing.T) { return nil, nil, errors.New("mock") } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) _, err := kc.listAliasesBefore210(ctx, 0) assert.Error(t, err) @@ -827,7 +823,7 @@ func TestCatalog_listAliasesBefore210(t *testing.T) { return []string{"key"}, []string{"not in pb format"}, nil } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) _, err := kc.listAliasesBefore210(ctx, 0) assert.Error(t, err) @@ -845,7 +841,7 @@ func TestCatalog_listAliasesBefore210(t *testing.T) { return []string{"key"}, []string{string(value)}, nil } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) got, err := kc.listAliasesBefore210(ctx, 0) assert.NoError(t, err) @@ -863,7 +859,7 @@ func TestCatalog_listAliasesAfter210(t *testing.T) { return nil, nil, errors.New("mock") } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) _, err := kc.listAliasesAfter210WithDb(ctx, testDb, 0) assert.Error(t, err) @@ -877,7 +873,7 @@ func TestCatalog_listAliasesAfter210(t *testing.T) { return []string{"key"}, []string{"not in pb format"}, nil } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) _, err := kc.listAliasesAfter210WithDb(ctx, testDb, 0) assert.Error(t, err) @@ -895,7 +891,7 @@ func TestCatalog_listAliasesAfter210(t *testing.T) { return []string{"key"}, []string{string(value)}, nil } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) got, err := kc.listAliasesAfter210WithDb(ctx, testDb, 0) assert.NoError(t, err) @@ -913,7 +909,7 @@ func TestCatalog_ListAliasesV2(t *testing.T) { return []string{"key"}, []string{"not in pb format"}, nil } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) _, err := kc.ListAliases(ctx, testDb, 0) assert.Error(t, err) @@ -938,7 +934,7 @@ func TestCatalog_ListAliasesV2(t *testing.T) { return []string{"key"}, []string{string(value)}, nil } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) _, err = kc.ListAliases(ctx, util.NonDBID, 0) assert.Error(t, err) @@ -963,7 +959,7 @@ func TestCatalog_ListAliasesV2(t *testing.T) { return []string{}, []string{}, nil } - kc := Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) got, err := kc.ListAliases(ctx, testDb, 0) assert.NoError(t, err) @@ -1020,14 +1016,14 @@ func Test_batchMultiSaveAndRemove(t *testing.T) { func TestCatalog_AlterCollection(t *testing.T) { t.Run("add", func(t *testing.T) { - kc := &Catalog{} + kc := NewCatalog(nil, nil) ctx := context.Background() err := kc.AlterCollection(ctx, nil, nil, metastore.ADD, 0) assert.Error(t, err) }) t.Run("delete", func(t *testing.T) { - kc := &Catalog{} + kc := NewCatalog(nil, nil) ctx := context.Background() err := kc.AlterCollection(ctx, nil, nil, metastore.DELETE, 0) assert.Error(t, err) @@ -1040,7 +1036,7 @@ func TestCatalog_AlterCollection(t *testing.T) { kvs[key] = value return nil } - kc := &Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) ctx := context.Background() var collectionID int64 = 1 oldC := &model.Collection{CollectionID: collectionID, State: pb.CollectionState_CollectionCreating} @@ -1058,7 +1054,7 @@ func TestCatalog_AlterCollection(t *testing.T) { }) t.Run("modify, tenant id changed", func(t *testing.T) { - kc := &Catalog{} + kc := NewCatalog(nil, nil) ctx := context.Background() var collectionID int64 = 1 oldC := &model.Collection{TenantID: "1", CollectionID: collectionID, State: pb.CollectionState_CollectionCreating} @@ -1077,7 +1073,7 @@ func TestCatalog_AlterCollection(t *testing.T) { return nil } - kc := &Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) ctx := context.Background() oldC := &model.Collection{DBID: 0, CollectionID: collectionID, State: pb.CollectionState_CollectionCreated} newC := &model.Collection{DBID: 1, CollectionID: collectionID, State: pb.CollectionState_CollectionCreated} @@ -1088,14 +1084,14 @@ func TestCatalog_AlterCollection(t *testing.T) { func TestCatalog_AlterPartition(t *testing.T) { t.Run("add", func(t *testing.T) { - kc := &Catalog{} + kc := NewCatalog(nil, nil) ctx := context.Background() err := kc.AlterPartition(ctx, testDb, nil, nil, metastore.ADD, 0) assert.Error(t, err) }) t.Run("delete", func(t *testing.T) { - kc := &Catalog{} + kc := NewCatalog(nil, nil) ctx := context.Background() err := kc.AlterPartition(ctx, testDb, nil, nil, metastore.DELETE, 0) assert.Error(t, err) @@ -1108,7 +1104,7 @@ func TestCatalog_AlterPartition(t *testing.T) { kvs[key] = value return nil } - kc := &Catalog{Snapshot: snapshot} + kc := NewCatalog(nil, snapshot).(*Catalog) ctx := context.Background() var collectionID int64 = 1 var partitionID int64 = 2 @@ -1127,7 +1123,7 @@ func TestCatalog_AlterPartition(t *testing.T) { }) t.Run("modify, tenant id changed", func(t *testing.T) { - kc := &Catalog{} + kc := NewCatalog(nil, nil) ctx := context.Background() var collectionID int64 = 1 oldP := &model.Partition{PartitionID: 1, CollectionID: collectionID, State: pb.PartitionState_PartitionCreating} @@ -1196,7 +1192,7 @@ func withMockMultiSaveAndRemove(err error) mockSnapshotOpt { func TestCatalog_CreateCollection(t *testing.T) { t.Run("collection not creating", func(t *testing.T) { - kc := &Catalog{} + kc := NewCatalog(nil, nil) ctx := context.Background() coll := &model.Collection{State: pb.CollectionState_CollectionDropping} err := kc.CreateCollection(ctx, coll, 100) @@ -1205,7 +1201,7 @@ func TestCatalog_CreateCollection(t *testing.T) { t.Run("failed to save collection", func(t *testing.T) { mockSnapshot := newMockSnapshot(t, withMockSave(errors.New("error mock Save"))) - kc := &Catalog{Snapshot: mockSnapshot} + kc := NewCatalog(nil, mockSnapshot) ctx := context.Background() coll := &model.Collection{State: pb.CollectionState_CollectionCreating} err := kc.CreateCollection(ctx, coll, 100) @@ -1214,7 +1210,7 @@ func TestCatalog_CreateCollection(t *testing.T) { t.Run("succeed to save collection but failed to save other keys", func(t *testing.T) { mockSnapshot := newMockSnapshot(t, withMockSave(nil), withMockMultiSave(errors.New("error mock MultiSave"))) - kc := &Catalog{Snapshot: mockSnapshot} + kc := NewCatalog(nil, mockSnapshot) ctx := context.Background() coll := &model.Collection{ Partitions: []*model.Partition{ @@ -1228,7 +1224,7 @@ func TestCatalog_CreateCollection(t *testing.T) { t.Run("normal case", func(t *testing.T) { mockSnapshot := newMockSnapshot(t, withMockSave(nil), withMockMultiSave(nil)) - kc := &Catalog{Snapshot: mockSnapshot} + kc := NewCatalog(nil, mockSnapshot) ctx := context.Background() coll := &model.Collection{ Partitions: []*model.Partition{ @@ -1242,7 +1238,7 @@ func TestCatalog_CreateCollection(t *testing.T) { t.Run("create collection with function", func(t *testing.T) { mockSnapshot := newMockSnapshot(t, withMockSave(nil), withMockMultiSave(nil)) - kc := &Catalog{Snapshot: mockSnapshot} + kc := NewCatalog(nil, mockSnapshot) ctx := context.Background() coll := &model.Collection{ Partitions: []*model.Partition{ @@ -1282,7 +1278,7 @@ func TestCatalog_CreateCollection(t *testing.T) { func TestCatalog_DropCollection(t *testing.T) { t.Run("failed to remove", func(t *testing.T) { mockSnapshot := newMockSnapshot(t, withMockMultiSaveAndRemove(errors.New("error mock MultiSaveAndRemove"))) - kc := &Catalog{Snapshot: mockSnapshot} + kc := NewCatalog(nil, mockSnapshot) ctx := context.Background() coll := &model.Collection{ Partitions: []*model.Partition{ @@ -1318,7 +1314,7 @@ func TestCatalog_DropCollection(t *testing.T) { removeCollectionCalled = true return errors.New("error mock MultiSaveAndRemove") }).Once() - kc := &Catalog{Snapshot: mockSnapshot} + kc := NewCatalog(nil, mockSnapshot) ctx := context.Background() coll := &model.Collection{ Partitions: []*model.Partition{ @@ -1334,7 +1330,7 @@ func TestCatalog_DropCollection(t *testing.T) { t.Run("normal case", func(t *testing.T) { mockSnapshot := newMockSnapshot(t, withMockMultiSaveAndRemove(nil)) - kc := &Catalog{Snapshot: mockSnapshot} + kc := NewCatalog(nil, mockSnapshot) ctx := context.Background() coll := &model.Collection{ Partitions: []*model.Partition{ @@ -1348,7 +1344,7 @@ func TestCatalog_DropCollection(t *testing.T) { t.Run("drop collection with function", func(t *testing.T) { mockSnapshot := newMockSnapshot(t, withMockMultiSaveAndRemove(nil)) - kc := &Catalog{Snapshot: mockSnapshot} + kc := NewCatalog(nil, mockSnapshot) ctx := context.Background() coll := &model.Collection{ Partitions: []*model.Partition{ @@ -1397,7 +1393,7 @@ func TestRBAC_Credential(t *testing.T) { t.Run("test GetCredential", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) loadFailName = "invalid" loadFailKey = fmt.Sprintf("%s/%s", CredentialPrefix, loadFailName) @@ -1448,7 +1444,7 @@ func TestRBAC_Credential(t *testing.T) { t.Run("test CreateCredential", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) invalidName = "invalid" ) @@ -1498,7 +1494,7 @@ func TestRBAC_Credential(t *testing.T) { t.Run("test DropCredential", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) validName = "user1" validUserRoleKeyPrefix = funcutil.HandleTenantForEtcdKey(RoleMappingPrefix, util.DefaultTenant, validName) @@ -1556,7 +1552,7 @@ func TestRBAC_Credential(t *testing.T) { t.Run("test ListCredentials", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) cmu sync.RWMutex count = 0 @@ -1638,7 +1634,7 @@ func TestRBAC_Role(t *testing.T) { t.Run("test remove", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil).(*Catalog) notExistKey = "not-exist" errorKey = "error" @@ -1682,7 +1678,7 @@ func TestRBAC_Role(t *testing.T) { t.Run("test save", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil).(*Catalog) notExistKey = "not-exist" errorKey = "error" @@ -1727,7 +1723,7 @@ func TestRBAC_Role(t *testing.T) { t.Run("test CreateRole", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) notExistName = "not-exist" notExistPath = funcutil.HandleTenantForEtcdKey(RolePrefix, tenant, notExistName) @@ -1770,7 +1766,7 @@ func TestRBAC_Role(t *testing.T) { t.Run("test DropRole", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) validName = "role1" errorName = "error" @@ -1823,7 +1819,7 @@ func TestRBAC_Role(t *testing.T) { t.Run("test AlterUserRole", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) user = "default-user" @@ -1886,7 +1882,7 @@ func TestRBAC_Role(t *testing.T) { t.Run("test entity!=nil", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) errorLoad = "error" errorLoadPath = funcutil.HandleTenantForEtcdKey(RolePrefix, tenant, errorLoad) @@ -1962,7 +1958,7 @@ func TestRBAC_Role(t *testing.T) { t.Run("test entity is nil", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) ) // Return valid keys if loadWithPrefixReturn == True @@ -2019,7 +2015,7 @@ func TestRBAC_Role(t *testing.T) { t.Run("test ListUser", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil).(*Catalog) invalidUser = "invalid-user" invalidUserKey = funcutil.HandleTenantForEtcdKey(RoleMappingPrefix, tenant, invalidUser) @@ -2161,7 +2157,7 @@ func TestRBAC_Role(t *testing.T) { var ( loadWithPrefixReturn atomic.Bool kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) ) // Return valid keys if loadWithPrefixReturn == True @@ -2237,7 +2233,7 @@ func TestRBAC_Grant(t *testing.T) { t.Run("test AlterGrant", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) ) validRoleKey := funcutil.HandleTenantForEtcdKey(GranteePrefix, tenant, fmt.Sprintf("%s/%s/%s", validRole, object, objName)) @@ -2423,7 +2419,7 @@ func TestRBAC_Grant(t *testing.T) { t.Run("test DeleteGrant", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) errorRole = "error-role" errorRolePrefix = funcutil.HandleTenantForEtcdKey(GranteePrefix, tenant, errorRole+"/") @@ -2463,7 +2459,7 @@ func TestRBAC_Grant(t *testing.T) { t.Run("test ListGrant", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) ) // Mock Load in kv_catalog.go:L901 @@ -2583,7 +2579,7 @@ func TestRBAC_Grant(t *testing.T) { t.Run("test ListPolicy", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) firstLoadWithPrefixReturn atomic.Bool secondLoadWithPrefixReturn atomic.Bool @@ -2689,7 +2685,7 @@ func TestRBAC_Backup(t *testing.T) { metaKV := etcdkv.NewEtcdKV(etcdCli, rootPath) defer metaKV.RemoveWithPrefix(context.TODO(), "") defer metaKV.Close() - c := &Catalog{Txn: metaKV} + c := NewCatalog(metaKV, nil) ctx := context.Background() c.CreateRole(ctx, util.DefaultTenant, &milvuspb.RoleEntity{Name: "role1"}) @@ -2742,7 +2738,7 @@ func TestRBAC_Restore(t *testing.T) { metaKV := etcdkv.NewEtcdKV(etcdCli, rootPath) defer metaKV.RemoveWithPrefix(context.TODO(), "") defer metaKV.Close() - c := &Catalog{Txn: metaKV} + c := NewCatalog(metaKV, nil) ctx := context.Background() @@ -2906,7 +2902,7 @@ func TestRBAC_PrivilegeGroup(t *testing.T) { t.Run("test GetPrivilegeGroup", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) ) kvmock.EXPECT().Load(mock.Anything, key1).Return(string(v1), nil) kvmock.EXPECT().Load(mock.Anything, key2).Return("", merr.ErrIoKeyNotFound) @@ -2936,7 +2932,7 @@ func TestRBAC_PrivilegeGroup(t *testing.T) { t.Run("test DropPrivilegeGroup", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) ) kvmock.EXPECT().Remove(mock.Anything, key1).Return(nil) @@ -2966,7 +2962,7 @@ func TestRBAC_PrivilegeGroup(t *testing.T) { t.Run("test SavePrivilegeGroup", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) ) kvmock.EXPECT().Save(mock.Anything, key1, mock.Anything).Return(nil) @@ -2996,7 +2992,7 @@ func TestRBAC_PrivilegeGroup(t *testing.T) { t.Run("test ListPrivilegeGroups", func(t *testing.T) { var ( kvmock = mocks.NewTxnKV(t) - c = &Catalog{Txn: kvmock} + c = NewCatalog(kvmock, nil) ) kvmock.EXPECT().LoadWithPrefix(mock.Anything, PrivilegeGroupPrefix).Return( @@ -3026,7 +3022,7 @@ func getPrivilegeNames(privileges []*milvuspb.PrivilegeEntity) []string { func TestCatalog_AlterDatabase(t *testing.T) { kvmock := mocks.NewSnapShotKV(t) - c := &Catalog{Snapshot: kvmock} + c := NewCatalog(nil, kvmock) db := model.NewDatabase(1, "db", pb.DatabaseState_DatabaseCreated, nil) kvmock.EXPECT().Save(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -3053,7 +3049,7 @@ func TestCatalog_AlterDatabase(t *testing.T) { func TestCatalog_listFunctionError(t *testing.T) { mockSnapshot := newMockSnapshot(t) - kc := &Catalog{Snapshot: mockSnapshot} + kc := NewCatalog(nil, mockSnapshot).(*Catalog) mockSnapshot.EXPECT().LoadWithPrefix(mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("mock error")) _, err := kc.listFunctions(context.TODO(), 1, 1) assert.Error(t, err) diff --git a/internal/metastore/mocks/mock_datacoord_catalog.go b/internal/metastore/mocks/mock_datacoord_catalog.go index d10d832b9b1c8..4a51940bb6846 100644 --- a/internal/metastore/mocks/mock_datacoord_catalog.go +++ b/internal/metastore/mocks/mock_datacoord_catalog.go @@ -1620,9 +1620,9 @@ func (_c *DataCoordCatalog_ListSegmentIndexes_Call) RunAndReturn(run func(contex return _c } -// ListSegments provides a mock function with given fields: ctx -func (_m *DataCoordCatalog) ListSegments(ctx context.Context) ([]*datapb.SegmentInfo, error) { - ret := _m.Called(ctx) +// ListSegments provides a mock function with given fields: ctx, collectionID +func (_m *DataCoordCatalog) ListSegments(ctx context.Context, collectionID int64) ([]*datapb.SegmentInfo, error) { + ret := _m.Called(ctx, collectionID) if len(ret) == 0 { panic("no return value specified for ListSegments") @@ -1630,19 +1630,19 @@ func (_m *DataCoordCatalog) ListSegments(ctx context.Context) ([]*datapb.Segment var r0 []*datapb.SegmentInfo var r1 error - if rf, ok := ret.Get(0).(func(context.Context) ([]*datapb.SegmentInfo, error)); ok { - return rf(ctx) + if rf, ok := ret.Get(0).(func(context.Context, int64) ([]*datapb.SegmentInfo, error)); ok { + return rf(ctx, collectionID) } - if rf, ok := ret.Get(0).(func(context.Context) []*datapb.SegmentInfo); ok { - r0 = rf(ctx) + if rf, ok := ret.Get(0).(func(context.Context, int64) []*datapb.SegmentInfo); ok { + r0 = rf(ctx, collectionID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*datapb.SegmentInfo) } } - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) + if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok { + r1 = rf(ctx, collectionID) } else { r1 = ret.Error(1) } @@ -1657,13 +1657,14 @@ type DataCoordCatalog_ListSegments_Call struct { // ListSegments is a helper method to define mock.On call // - ctx context.Context -func (_e *DataCoordCatalog_Expecter) ListSegments(ctx interface{}) *DataCoordCatalog_ListSegments_Call { - return &DataCoordCatalog_ListSegments_Call{Call: _e.mock.On("ListSegments", ctx)} +// - collectionID int64 +func (_e *DataCoordCatalog_Expecter) ListSegments(ctx interface{}, collectionID interface{}) *DataCoordCatalog_ListSegments_Call { + return &DataCoordCatalog_ListSegments_Call{Call: _e.mock.On("ListSegments", ctx, collectionID)} } -func (_c *DataCoordCatalog_ListSegments_Call) Run(run func(ctx context.Context)) *DataCoordCatalog_ListSegments_Call { +func (_c *DataCoordCatalog_ListSegments_Call) Run(run func(ctx context.Context, collectionID int64)) *DataCoordCatalog_ListSegments_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) + run(args[0].(context.Context), args[1].(int64)) }) return _c } @@ -1673,7 +1674,7 @@ func (_c *DataCoordCatalog_ListSegments_Call) Return(_a0 []*datapb.SegmentInfo, return _c } -func (_c *DataCoordCatalog_ListSegments_Call) RunAndReturn(run func(context.Context) ([]*datapb.SegmentInfo, error)) *DataCoordCatalog_ListSegments_Call { +func (_c *DataCoordCatalog_ListSegments_Call) RunAndReturn(run func(context.Context, int64) ([]*datapb.SegmentInfo, error)) *DataCoordCatalog_ListSegments_Call { _c.Call.Return(run) return _c } diff --git a/internal/metastore/mocks/mock_querycoord_catalog.go b/internal/metastore/mocks/mock_querycoord_catalog.go index c8368bfcbf31a..585d20788e67f 100644 --- a/internal/metastore/mocks/mock_querycoord_catalog.go +++ b/internal/metastore/mocks/mock_querycoord_catalog.go @@ -139,9 +139,9 @@ func (_c *QueryCoordCatalog_GetCollections_Call) RunAndReturn(run func(context.C return _c } -// GetPartitions provides a mock function with given fields: ctx -func (_m *QueryCoordCatalog) GetPartitions(ctx context.Context) (map[int64][]*querypb.PartitionLoadInfo, error) { - ret := _m.Called(ctx) +// GetPartitions provides a mock function with given fields: ctx, collectionIDs +func (_m *QueryCoordCatalog) GetPartitions(ctx context.Context, collectionIDs []int64) (map[int64][]*querypb.PartitionLoadInfo, error) { + ret := _m.Called(ctx, collectionIDs) if len(ret) == 0 { panic("no return value specified for GetPartitions") @@ -149,19 +149,19 @@ func (_m *QueryCoordCatalog) GetPartitions(ctx context.Context) (map[int64][]*qu var r0 map[int64][]*querypb.PartitionLoadInfo var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (map[int64][]*querypb.PartitionLoadInfo, error)); ok { - return rf(ctx) + if rf, ok := ret.Get(0).(func(context.Context, []int64) (map[int64][]*querypb.PartitionLoadInfo, error)); ok { + return rf(ctx, collectionIDs) } - if rf, ok := ret.Get(0).(func(context.Context) map[int64][]*querypb.PartitionLoadInfo); ok { - r0 = rf(ctx) + if rf, ok := ret.Get(0).(func(context.Context, []int64) map[int64][]*querypb.PartitionLoadInfo); ok { + r0 = rf(ctx, collectionIDs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(map[int64][]*querypb.PartitionLoadInfo) } } - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) + if rf, ok := ret.Get(1).(func(context.Context, []int64) error); ok { + r1 = rf(ctx, collectionIDs) } else { r1 = ret.Error(1) } @@ -176,13 +176,14 @@ type QueryCoordCatalog_GetPartitions_Call struct { // GetPartitions is a helper method to define mock.On call // - ctx context.Context -func (_e *QueryCoordCatalog_Expecter) GetPartitions(ctx interface{}) *QueryCoordCatalog_GetPartitions_Call { - return &QueryCoordCatalog_GetPartitions_Call{Call: _e.mock.On("GetPartitions", ctx)} +// - collectionIDs []int64 +func (_e *QueryCoordCatalog_Expecter) GetPartitions(ctx interface{}, collectionIDs interface{}) *QueryCoordCatalog_GetPartitions_Call { + return &QueryCoordCatalog_GetPartitions_Call{Call: _e.mock.On("GetPartitions", ctx, collectionIDs)} } -func (_c *QueryCoordCatalog_GetPartitions_Call) Run(run func(ctx context.Context)) *QueryCoordCatalog_GetPartitions_Call { +func (_c *QueryCoordCatalog_GetPartitions_Call) Run(run func(ctx context.Context, collectionIDs []int64)) *QueryCoordCatalog_GetPartitions_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) + run(args[0].(context.Context), args[1].([]int64)) }) return _c } @@ -192,7 +193,7 @@ func (_c *QueryCoordCatalog_GetPartitions_Call) Return(_a0 map[int64][]*querypb. return _c } -func (_c *QueryCoordCatalog_GetPartitions_Call) RunAndReturn(run func(context.Context) (map[int64][]*querypb.PartitionLoadInfo, error)) *QueryCoordCatalog_GetPartitions_Call { +func (_c *QueryCoordCatalog_GetPartitions_Call) RunAndReturn(run func(context.Context, []int64) (map[int64][]*querypb.PartitionLoadInfo, error)) *QueryCoordCatalog_GetPartitions_Call { _c.Call.Return(run) return _c } diff --git a/internal/metastore/mocks/mock_rootcoord_catalog.go b/internal/metastore/mocks/mock_rootcoord_catalog.go index 8c35d288c1143..e55a50c5468db 100644 --- a/internal/metastore/mocks/mock_rootcoord_catalog.go +++ b/internal/metastore/mocks/mock_rootcoord_catalog.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.4. DO NOT EDIT. +// Code generated by mockery v2.46.0. DO NOT EDIT. package mocks @@ -30,6 +30,10 @@ func (_m *RootCoordCatalog) EXPECT() *RootCoordCatalog_Expecter { func (_m *RootCoordCatalog) AlterAlias(ctx context.Context, alias *model.Alias, ts uint64) error { ret := _m.Called(ctx, alias, ts) + if len(ret) == 0 { + panic("no return value specified for AlterAlias") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *model.Alias, uint64) error); ok { r0 = rf(ctx, alias, ts) @@ -74,6 +78,10 @@ func (_c *RootCoordCatalog_AlterAlias_Call) RunAndReturn(run func(context.Contex func (_m *RootCoordCatalog) AlterCollection(ctx context.Context, oldColl *model.Collection, newColl *model.Collection, alterType metastore.AlterType, ts uint64) error { ret := _m.Called(ctx, oldColl, newColl, alterType, ts) + if len(ret) == 0 { + panic("no return value specified for AlterCollection") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *model.Collection, *model.Collection, metastore.AlterType, uint64) error); ok { r0 = rf(ctx, oldColl, newColl, alterType, ts) @@ -120,6 +128,10 @@ func (_c *RootCoordCatalog_AlterCollection_Call) RunAndReturn(run func(context.C func (_m *RootCoordCatalog) AlterCredential(ctx context.Context, credential *model.Credential) error { ret := _m.Called(ctx, credential) + if len(ret) == 0 { + panic("no return value specified for AlterCredential") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *model.Credential) error); ok { r0 = rf(ctx, credential) @@ -163,6 +175,10 @@ func (_c *RootCoordCatalog_AlterCredential_Call) RunAndReturn(run func(context.C func (_m *RootCoordCatalog) AlterDatabase(ctx context.Context, newDB *model.Database, ts uint64) error { ret := _m.Called(ctx, newDB, ts) + if len(ret) == 0 { + panic("no return value specified for AlterDatabase") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *model.Database, uint64) error); ok { r0 = rf(ctx, newDB, ts) @@ -207,6 +223,10 @@ func (_c *RootCoordCatalog_AlterDatabase_Call) RunAndReturn(run func(context.Con func (_m *RootCoordCatalog) AlterGrant(ctx context.Context, tenant string, entity *milvuspb.GrantEntity, operateType milvuspb.OperatePrivilegeType) error { ret := _m.Called(ctx, tenant, entity, operateType) + if len(ret) == 0 { + panic("no return value specified for AlterGrant") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *milvuspb.GrantEntity, milvuspb.OperatePrivilegeType) error); ok { r0 = rf(ctx, tenant, entity, operateType) @@ -252,6 +272,10 @@ func (_c *RootCoordCatalog_AlterGrant_Call) RunAndReturn(run func(context.Contex func (_m *RootCoordCatalog) AlterPartition(ctx context.Context, dbID int64, oldPart *model.Partition, newPart *model.Partition, alterType metastore.AlterType, ts uint64) error { ret := _m.Called(ctx, dbID, oldPart, newPart, alterType, ts) + if len(ret) == 0 { + panic("no return value specified for AlterPartition") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, int64, *model.Partition, *model.Partition, metastore.AlterType, uint64) error); ok { r0 = rf(ctx, dbID, oldPart, newPart, alterType, ts) @@ -299,6 +323,10 @@ func (_c *RootCoordCatalog_AlterPartition_Call) RunAndReturn(run func(context.Co func (_m *RootCoordCatalog) AlterUserRole(ctx context.Context, tenant string, userEntity *milvuspb.UserEntity, roleEntity *milvuspb.RoleEntity, operateType milvuspb.OperateUserRoleType) error { ret := _m.Called(ctx, tenant, userEntity, roleEntity, operateType) + if len(ret) == 0 { + panic("no return value specified for AlterUserRole") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *milvuspb.UserEntity, *milvuspb.RoleEntity, milvuspb.OperateUserRoleType) error); ok { r0 = rf(ctx, tenant, userEntity, roleEntity, operateType) @@ -345,6 +373,10 @@ func (_c *RootCoordCatalog_AlterUserRole_Call) RunAndReturn(run func(context.Con func (_m *RootCoordCatalog) BackupRBAC(ctx context.Context, tenant string) (*milvuspb.RBACMeta, error) { ret := _m.Called(ctx, tenant) + if len(ret) == 0 { + panic("no return value specified for BackupRBAC") + } + var r0 *milvuspb.RBACMeta var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*milvuspb.RBACMeta, error)); ok { @@ -432,6 +464,10 @@ func (_c *RootCoordCatalog_Close_Call) RunAndReturn(run func()) *RootCoordCatalo func (_m *RootCoordCatalog) CollectionExists(ctx context.Context, dbID int64, collectionID int64, ts uint64) bool { ret := _m.Called(ctx, dbID, collectionID, ts) + if len(ret) == 0 { + panic("no return value specified for CollectionExists") + } + var r0 bool if rf, ok := ret.Get(0).(func(context.Context, int64, int64, uint64) bool); ok { r0 = rf(ctx, dbID, collectionID, ts) @@ -477,6 +513,10 @@ func (_c *RootCoordCatalog_CollectionExists_Call) RunAndReturn(run func(context. func (_m *RootCoordCatalog) CreateAlias(ctx context.Context, alias *model.Alias, ts uint64) error { ret := _m.Called(ctx, alias, ts) + if len(ret) == 0 { + panic("no return value specified for CreateAlias") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *model.Alias, uint64) error); ok { r0 = rf(ctx, alias, ts) @@ -521,6 +561,10 @@ func (_c *RootCoordCatalog_CreateAlias_Call) RunAndReturn(run func(context.Conte func (_m *RootCoordCatalog) CreateCollection(ctx context.Context, collectionInfo *model.Collection, ts uint64) error { ret := _m.Called(ctx, collectionInfo, ts) + if len(ret) == 0 { + panic("no return value specified for CreateCollection") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *model.Collection, uint64) error); ok { r0 = rf(ctx, collectionInfo, ts) @@ -565,6 +609,10 @@ func (_c *RootCoordCatalog_CreateCollection_Call) RunAndReturn(run func(context. func (_m *RootCoordCatalog) CreateCredential(ctx context.Context, credential *model.Credential) error { ret := _m.Called(ctx, credential) + if len(ret) == 0 { + panic("no return value specified for CreateCredential") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *model.Credential) error); ok { r0 = rf(ctx, credential) @@ -608,6 +656,10 @@ func (_c *RootCoordCatalog_CreateCredential_Call) RunAndReturn(run func(context. func (_m *RootCoordCatalog) CreateDatabase(ctx context.Context, db *model.Database, ts uint64) error { ret := _m.Called(ctx, db, ts) + if len(ret) == 0 { + panic("no return value specified for CreateDatabase") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *model.Database, uint64) error); ok { r0 = rf(ctx, db, ts) @@ -652,6 +704,10 @@ func (_c *RootCoordCatalog_CreateDatabase_Call) RunAndReturn(run func(context.Co func (_m *RootCoordCatalog) CreatePartition(ctx context.Context, dbID int64, partition *model.Partition, ts uint64) error { ret := _m.Called(ctx, dbID, partition, ts) + if len(ret) == 0 { + panic("no return value specified for CreatePartition") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, int64, *model.Partition, uint64) error); ok { r0 = rf(ctx, dbID, partition, ts) @@ -697,6 +753,10 @@ func (_c *RootCoordCatalog_CreatePartition_Call) RunAndReturn(run func(context.C func (_m *RootCoordCatalog) CreateRole(ctx context.Context, tenant string, entity *milvuspb.RoleEntity) error { ret := _m.Called(ctx, tenant, entity) + if len(ret) == 0 { + panic("no return value specified for CreateRole") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *milvuspb.RoleEntity) error); ok { r0 = rf(ctx, tenant, entity) @@ -741,6 +801,10 @@ func (_c *RootCoordCatalog_CreateRole_Call) RunAndReturn(run func(context.Contex func (_m *RootCoordCatalog) DeleteGrant(ctx context.Context, tenant string, role *milvuspb.RoleEntity) error { ret := _m.Called(ctx, tenant, role) + if len(ret) == 0 { + panic("no return value specified for DeleteGrant") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *milvuspb.RoleEntity) error); ok { r0 = rf(ctx, tenant, role) @@ -785,6 +849,10 @@ func (_c *RootCoordCatalog_DeleteGrant_Call) RunAndReturn(run func(context.Conte func (_m *RootCoordCatalog) DropAlias(ctx context.Context, dbID int64, alias string, ts uint64) error { ret := _m.Called(ctx, dbID, alias, ts) + if len(ret) == 0 { + panic("no return value specified for DropAlias") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, int64, string, uint64) error); ok { r0 = rf(ctx, dbID, alias, ts) @@ -830,6 +898,10 @@ func (_c *RootCoordCatalog_DropAlias_Call) RunAndReturn(run func(context.Context func (_m *RootCoordCatalog) DropCollection(ctx context.Context, collectionInfo *model.Collection, ts uint64) error { ret := _m.Called(ctx, collectionInfo, ts) + if len(ret) == 0 { + panic("no return value specified for DropCollection") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *model.Collection, uint64) error); ok { r0 = rf(ctx, collectionInfo, ts) @@ -874,6 +946,10 @@ func (_c *RootCoordCatalog_DropCollection_Call) RunAndReturn(run func(context.Co func (_m *RootCoordCatalog) DropCredential(ctx context.Context, username string) error { ret := _m.Called(ctx, username) + if len(ret) == 0 { + panic("no return value specified for DropCredential") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, username) @@ -917,6 +993,10 @@ func (_c *RootCoordCatalog_DropCredential_Call) RunAndReturn(run func(context.Co func (_m *RootCoordCatalog) DropDatabase(ctx context.Context, dbID int64, ts uint64) error { ret := _m.Called(ctx, dbID, ts) + if len(ret) == 0 { + panic("no return value specified for DropDatabase") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, int64, uint64) error); ok { r0 = rf(ctx, dbID, ts) @@ -961,6 +1041,10 @@ func (_c *RootCoordCatalog_DropDatabase_Call) RunAndReturn(run func(context.Cont func (_m *RootCoordCatalog) DropPartition(ctx context.Context, dbID int64, collectionID int64, partitionID int64, ts uint64) error { ret := _m.Called(ctx, dbID, collectionID, partitionID, ts) + if len(ret) == 0 { + panic("no return value specified for DropPartition") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, int64, int64, int64, uint64) error); ok { r0 = rf(ctx, dbID, collectionID, partitionID, ts) @@ -1003,10 +1087,61 @@ func (_c *RootCoordCatalog_DropPartition_Call) RunAndReturn(run func(context.Con return _c } +// DropPrivilegeGroup provides a mock function with given fields: ctx, groupName +func (_m *RootCoordCatalog) DropPrivilegeGroup(ctx context.Context, groupName string) error { + ret := _m.Called(ctx, groupName) + + if len(ret) == 0 { + panic("no return value specified for DropPrivilegeGroup") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, groupName) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RootCoordCatalog_DropPrivilegeGroup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropPrivilegeGroup' +type RootCoordCatalog_DropPrivilegeGroup_Call struct { + *mock.Call +} + +// DropPrivilegeGroup is a helper method to define mock.On call +// - ctx context.Context +// - groupName string +func (_e *RootCoordCatalog_Expecter) DropPrivilegeGroup(ctx interface{}, groupName interface{}) *RootCoordCatalog_DropPrivilegeGroup_Call { + return &RootCoordCatalog_DropPrivilegeGroup_Call{Call: _e.mock.On("DropPrivilegeGroup", ctx, groupName)} +} + +func (_c *RootCoordCatalog_DropPrivilegeGroup_Call) Run(run func(ctx context.Context, groupName string)) *RootCoordCatalog_DropPrivilegeGroup_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *RootCoordCatalog_DropPrivilegeGroup_Call) Return(_a0 error) *RootCoordCatalog_DropPrivilegeGroup_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RootCoordCatalog_DropPrivilegeGroup_Call) RunAndReturn(run func(context.Context, string) error) *RootCoordCatalog_DropPrivilegeGroup_Call { + _c.Call.Return(run) + return _c +} + // DropRole provides a mock function with given fields: ctx, tenant, roleName func (_m *RootCoordCatalog) DropRole(ctx context.Context, tenant string, roleName string) error { ret := _m.Called(ctx, tenant, roleName) + if len(ret) == 0 { + panic("no return value specified for DropRole") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { r0 = rf(ctx, tenant, roleName) @@ -1051,6 +1186,10 @@ func (_c *RootCoordCatalog_DropRole_Call) RunAndReturn(run func(context.Context, func (_m *RootCoordCatalog) GetCollectionByID(ctx context.Context, dbID int64, ts uint64, collectionID int64) (*model.Collection, error) { ret := _m.Called(ctx, dbID, ts, collectionID) + if len(ret) == 0 { + panic("no return value specified for GetCollectionByID") + } + var r0 *model.Collection var r1 error if rf, ok := ret.Get(0).(func(context.Context, int64, uint64, int64) (*model.Collection, error)); ok { @@ -1108,6 +1247,10 @@ func (_c *RootCoordCatalog_GetCollectionByID_Call) RunAndReturn(run func(context func (_m *RootCoordCatalog) GetCollectionByName(ctx context.Context, dbID int64, collectionName string, ts uint64) (*model.Collection, error) { ret := _m.Called(ctx, dbID, collectionName, ts) + if len(ret) == 0 { + panic("no return value specified for GetCollectionByName") + } + var r0 *model.Collection var r1 error if rf, ok := ret.Get(0).(func(context.Context, int64, string, uint64) (*model.Collection, error)); ok { @@ -1165,6 +1308,10 @@ func (_c *RootCoordCatalog_GetCollectionByName_Call) RunAndReturn(run func(conte func (_m *RootCoordCatalog) GetCredential(ctx context.Context, username string) (*model.Credential, error) { ret := _m.Called(ctx, username) + if len(ret) == 0 { + panic("no return value specified for GetCredential") + } + var r0 *model.Credential var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*model.Credential, error)); ok { @@ -1216,10 +1363,73 @@ func (_c *RootCoordCatalog_GetCredential_Call) RunAndReturn(run func(context.Con return _c } +// GetPrivilegeGroup provides a mock function with given fields: ctx, groupName +func (_m *RootCoordCatalog) GetPrivilegeGroup(ctx context.Context, groupName string) (*milvuspb.PrivilegeGroupInfo, error) { + ret := _m.Called(ctx, groupName) + + if len(ret) == 0 { + panic("no return value specified for GetPrivilegeGroup") + } + + var r0 *milvuspb.PrivilegeGroupInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*milvuspb.PrivilegeGroupInfo, error)); ok { + return rf(ctx, groupName) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *milvuspb.PrivilegeGroupInfo); ok { + r0 = rf(ctx, groupName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*milvuspb.PrivilegeGroupInfo) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, groupName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootCoordCatalog_GetPrivilegeGroup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPrivilegeGroup' +type RootCoordCatalog_GetPrivilegeGroup_Call struct { + *mock.Call +} + +// GetPrivilegeGroup is a helper method to define mock.On call +// - ctx context.Context +// - groupName string +func (_e *RootCoordCatalog_Expecter) GetPrivilegeGroup(ctx interface{}, groupName interface{}) *RootCoordCatalog_GetPrivilegeGroup_Call { + return &RootCoordCatalog_GetPrivilegeGroup_Call{Call: _e.mock.On("GetPrivilegeGroup", ctx, groupName)} +} + +func (_c *RootCoordCatalog_GetPrivilegeGroup_Call) Run(run func(ctx context.Context, groupName string)) *RootCoordCatalog_GetPrivilegeGroup_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *RootCoordCatalog_GetPrivilegeGroup_Call) Return(_a0 *milvuspb.PrivilegeGroupInfo, _a1 error) *RootCoordCatalog_GetPrivilegeGroup_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *RootCoordCatalog_GetPrivilegeGroup_Call) RunAndReturn(run func(context.Context, string) (*milvuspb.PrivilegeGroupInfo, error)) *RootCoordCatalog_GetPrivilegeGroup_Call { + _c.Call.Return(run) + return _c +} + // ListAliases provides a mock function with given fields: ctx, dbID, ts func (_m *RootCoordCatalog) ListAliases(ctx context.Context, dbID int64, ts uint64) ([]*model.Alias, error) { ret := _m.Called(ctx, dbID, ts) + if len(ret) == 0 { + panic("no return value specified for ListAliases") + } + var r0 []*model.Alias var r1 error if rf, ok := ret.Get(0).(func(context.Context, int64, uint64) ([]*model.Alias, error)); ok { @@ -1276,6 +1486,10 @@ func (_c *RootCoordCatalog_ListAliases_Call) RunAndReturn(run func(context.Conte func (_m *RootCoordCatalog) ListCollections(ctx context.Context, dbID int64, ts uint64) ([]*model.Collection, error) { ret := _m.Called(ctx, dbID, ts) + if len(ret) == 0 { + panic("no return value specified for ListCollections") + } + var r0 []*model.Collection var r1 error if rf, ok := ret.Get(0).(func(context.Context, int64, uint64) ([]*model.Collection, error)); ok { @@ -1332,6 +1546,10 @@ func (_c *RootCoordCatalog_ListCollections_Call) RunAndReturn(run func(context.C func (_m *RootCoordCatalog) ListCredentials(ctx context.Context) ([]string, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for ListCredentials") + } + var r0 []string var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]string, error)); ok { @@ -1386,6 +1604,10 @@ func (_c *RootCoordCatalog_ListCredentials_Call) RunAndReturn(run func(context.C func (_m *RootCoordCatalog) ListCredentialsWithPasswd(ctx context.Context) (map[string]string, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for ListCredentialsWithPasswd") + } + var r0 map[string]string var r1 error if rf, ok := ret.Get(0).(func(context.Context) (map[string]string, error)); ok { @@ -1440,6 +1662,10 @@ func (_c *RootCoordCatalog_ListCredentialsWithPasswd_Call) RunAndReturn(run func func (_m *RootCoordCatalog) ListDatabases(ctx context.Context, ts uint64) ([]*model.Database, error) { ret := _m.Called(ctx, ts) + if len(ret) == 0 { + panic("no return value specified for ListDatabases") + } + var r0 []*model.Database var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) ([]*model.Database, error)); ok { @@ -1495,6 +1721,10 @@ func (_c *RootCoordCatalog_ListDatabases_Call) RunAndReturn(run func(context.Con func (_m *RootCoordCatalog) ListGrant(ctx context.Context, tenant string, entity *milvuspb.GrantEntity) ([]*milvuspb.GrantEntity, error) { ret := _m.Called(ctx, tenant, entity) + if len(ret) == 0 { + panic("no return value specified for ListGrant") + } + var r0 []*milvuspb.GrantEntity var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *milvuspb.GrantEntity) ([]*milvuspb.GrantEntity, error)); ok { @@ -1551,6 +1781,10 @@ func (_c *RootCoordCatalog_ListGrant_Call) RunAndReturn(run func(context.Context func (_m *RootCoordCatalog) ListPolicy(ctx context.Context, tenant string) ([]string, error) { ret := _m.Called(ctx, tenant) + if len(ret) == 0 { + panic("no return value specified for ListPolicy") + } + var r0 []string var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) ([]string, error)); ok { @@ -1602,10 +1836,72 @@ func (_c *RootCoordCatalog_ListPolicy_Call) RunAndReturn(run func(context.Contex return _c } +// ListPrivilegeGroups provides a mock function with given fields: ctx +func (_m *RootCoordCatalog) ListPrivilegeGroups(ctx context.Context) ([]*milvuspb.PrivilegeGroupInfo, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ListPrivilegeGroups") + } + + var r0 []*milvuspb.PrivilegeGroupInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]*milvuspb.PrivilegeGroupInfo, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []*milvuspb.PrivilegeGroupInfo); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*milvuspb.PrivilegeGroupInfo) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootCoordCatalog_ListPrivilegeGroups_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListPrivilegeGroups' +type RootCoordCatalog_ListPrivilegeGroups_Call struct { + *mock.Call +} + +// ListPrivilegeGroups is a helper method to define mock.On call +// - ctx context.Context +func (_e *RootCoordCatalog_Expecter) ListPrivilegeGroups(ctx interface{}) *RootCoordCatalog_ListPrivilegeGroups_Call { + return &RootCoordCatalog_ListPrivilegeGroups_Call{Call: _e.mock.On("ListPrivilegeGroups", ctx)} +} + +func (_c *RootCoordCatalog_ListPrivilegeGroups_Call) Run(run func(ctx context.Context)) *RootCoordCatalog_ListPrivilegeGroups_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *RootCoordCatalog_ListPrivilegeGroups_Call) Return(_a0 []*milvuspb.PrivilegeGroupInfo, _a1 error) *RootCoordCatalog_ListPrivilegeGroups_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *RootCoordCatalog_ListPrivilegeGroups_Call) RunAndReturn(run func(context.Context) ([]*milvuspb.PrivilegeGroupInfo, error)) *RootCoordCatalog_ListPrivilegeGroups_Call { + _c.Call.Return(run) + return _c +} + // ListRole provides a mock function with given fields: ctx, tenant, entity, includeUserInfo func (_m *RootCoordCatalog) ListRole(ctx context.Context, tenant string, entity *milvuspb.RoleEntity, includeUserInfo bool) ([]*milvuspb.RoleResult, error) { ret := _m.Called(ctx, tenant, entity, includeUserInfo) + if len(ret) == 0 { + panic("no return value specified for ListRole") + } + var r0 []*milvuspb.RoleResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *milvuspb.RoleEntity, bool) ([]*milvuspb.RoleResult, error)); ok { @@ -1663,6 +1959,10 @@ func (_c *RootCoordCatalog_ListRole_Call) RunAndReturn(run func(context.Context, func (_m *RootCoordCatalog) ListUser(ctx context.Context, tenant string, entity *milvuspb.UserEntity, includeRoleInfo bool) ([]*milvuspb.UserResult, error) { ret := _m.Called(ctx, tenant, entity, includeRoleInfo) + if len(ret) == 0 { + panic("no return value specified for ListUser") + } + var r0 []*milvuspb.UserResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *milvuspb.UserEntity, bool) ([]*milvuspb.UserResult, error)); ok { @@ -1720,6 +2020,10 @@ func (_c *RootCoordCatalog_ListUser_Call) RunAndReturn(run func(context.Context, func (_m *RootCoordCatalog) ListUserRole(ctx context.Context, tenant string) ([]string, error) { ret := _m.Called(ctx, tenant) + if len(ret) == 0 { + panic("no return value specified for ListUserRole") + } + var r0 []string var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) ([]string, error)); ok { @@ -1775,6 +2079,10 @@ func (_c *RootCoordCatalog_ListUserRole_Call) RunAndReturn(run func(context.Cont func (_m *RootCoordCatalog) RestoreRBAC(ctx context.Context, tenant string, meta *milvuspb.RBACMeta) error { ret := _m.Called(ctx, tenant, meta) + if len(ret) == 0 { + panic("no return value specified for RestoreRBAC") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, *milvuspb.RBACMeta) error); ok { r0 = rf(ctx, tenant, meta) @@ -1815,122 +2123,14 @@ func (_c *RootCoordCatalog_RestoreRBAC_Call) RunAndReturn(run func(context.Conte return _c } -// NewRootCoordCatalog creates a new instance of RootCoordCatalog. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewRootCoordCatalog(t interface { - mock.TestingT - Cleanup(func()) -}) *RootCoordCatalog { - mock := &RootCoordCatalog{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} - -// GetPrivilegeGroup provides a mock function with given fields: ctx, groupName -func (_m *RootCoordCatalog) GetPrivilegeGroup(ctx context.Context, groupName string) (*milvuspb.PrivilegeGroupInfo, error) { - ret := _m.Called(ctx, groupName) - - var r0 *milvuspb.PrivilegeGroupInfo - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) (*milvuspb.PrivilegeGroupInfo, error)); ok { - return rf(ctx, groupName) - } - if rf, ok := ret.Get(0).(func(context.Context, string) *milvuspb.PrivilegeGroupInfo); ok { - r0 = rf(ctx, groupName) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*milvuspb.PrivilegeGroupInfo) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { - r1 = rf(ctx, groupName) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RootCoordCatalog_GetPrivilegeGroup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPrivilegeGroup' -type RootCoordCatalog_GetPrivilegeGroup_Call struct { - *mock.Call -} - -// GetPrivilegeGroup is a helper method to define mock.On call -// - ctx context.Context -// - groupName string -func (_e *RootCoordCatalog_Expecter) GetPrivilegeGroup(ctx interface{}, groupName interface{}) *RootCoordCatalog_GetPrivilegeGroup_Call { - return &RootCoordCatalog_GetPrivilegeGroup_Call{Call: _e.mock.On("GetPrivilegeGroup", ctx, groupName)} -} - -func (_c *RootCoordCatalog_GetPrivilegeGroup_Call) Run(run func(ctx context.Context, groupName string)) *RootCoordCatalog_GetPrivilegeGroup_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string)) - }) - return _c -} - -func (_c *RootCoordCatalog_GetPrivilegeGroup_Call) Return(_a0 *milvuspb.PrivilegeGroupInfo, _a1 error) *RootCoordCatalog_GetPrivilegeGroup_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *RootCoordCatalog_GetPrivilegeGroup_Call) RunAndReturn(run func(context.Context, string) (*milvuspb.PrivilegeGroupInfo, error)) *RootCoordCatalog_GetPrivilegeGroup_Call { - _c.Call.Return(run) - return _c -} - -// DropPrivilegeGroup provides a mock function with given fields: ctx, groupName, privileges -func (_m *RootCoordCatalog) DropPrivilegeGroup(ctx context.Context, groupName string) error { - ret := _m.Called(ctx, groupName) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { - r0 = rf(ctx, groupName) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// RootCoordCatalog_DropPrivilegeGroup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropPrivilegeGroup' -type RootCoordCatalog_DropPrivilegeGroup_Call struct { - *mock.Call -} - -// DropPrivilegeGroup is a helper method to define mock.On call -// - ctx context.Context -// - groupName string -func (_e *RootCoordCatalog_Expecter) DropPrivilegeGroup(ctx interface{}, groupName interface{}) *RootCoordCatalog_DropPrivilegeGroup_Call { - return &RootCoordCatalog_DropPrivilegeGroup_Call{Call: _e.mock.On("DropPrivilegeGroup", ctx, groupName)} -} - -func (_c *RootCoordCatalog_DropPrivilegeGroup_Call) Run(run func(ctx context.Context, groupName string)) *RootCoordCatalog_DropPrivilegeGroup_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string)) - }) - return _c -} - -func (_c *RootCoordCatalog_DropPrivilegeGroup_Call) Return(_a0 error) *RootCoordCatalog_DropPrivilegeGroup_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *RootCoordCatalog_DropPrivilegeGroup_Call) RunAndReturn(run func(context.Context, string) error) *RootCoordCatalog_DropPrivilegeGroup_Call { - _c.Call.Return(run) - return _c -} - -// SavePrivilegeGroup provides a mock function with given fields: ctx, groupName, privileges +// SavePrivilegeGroup provides a mock function with given fields: ctx, data func (_m *RootCoordCatalog) SavePrivilegeGroup(ctx context.Context, data *milvuspb.PrivilegeGroupInfo) error { ret := _m.Called(ctx, data) + if len(ret) == 0 { + panic("no return value specified for SavePrivilegeGroup") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *milvuspb.PrivilegeGroupInfo) error); ok { r0 = rf(ctx, data) @@ -1948,7 +2148,7 @@ type RootCoordCatalog_SavePrivilegeGroup_Call struct { // SavePrivilegeGroup is a helper method to define mock.On call // - ctx context.Context -// - groupName string +// - data *milvuspb.PrivilegeGroupInfo func (_e *RootCoordCatalog_Expecter) SavePrivilegeGroup(ctx interface{}, data interface{}) *RootCoordCatalog_SavePrivilegeGroup_Call { return &RootCoordCatalog_SavePrivilegeGroup_Call{Call: _e.mock.On("SavePrivilegeGroup", ctx, data)} } @@ -1970,56 +2170,16 @@ func (_c *RootCoordCatalog_SavePrivilegeGroup_Call) RunAndReturn(run func(contex return _c } -// ListPrivilegeGroups provides a mock function with given fields: ctx -func (_m *RootCoordCatalog) ListPrivilegeGroups(ctx context.Context) ([]*milvuspb.PrivilegeGroupInfo, error) { - ret := _m.Called(ctx) - - var r0 []*milvuspb.PrivilegeGroupInfo - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) ([]*milvuspb.PrivilegeGroupInfo, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) []*milvuspb.PrivilegeGroupInfo); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*milvuspb.PrivilegeGroupInfo) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RootCoordCatalog_ListPrivilegeGroups_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListPrivilegeGroups' -type RootCoordCatalog_ListPrivilegeGroups_Call struct { - *mock.Call -} - -// ListPrivilegeGroups is a helper method to define mock.On call -// - ctx context.Context -func (_e *RootCoordCatalog_Expecter) ListPrivilegeGroups(ctx interface{}) *RootCoordCatalog_ListPrivilegeGroups_Call { - return &RootCoordCatalog_ListPrivilegeGroups_Call{Call: _e.mock.On("ListPrivilegeGroups", ctx)} -} - -func (_c *RootCoordCatalog_ListPrivilegeGroups_Call) Run(run func(ctx context.Context)) *RootCoordCatalog_ListPrivilegeGroups_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} +// NewRootCoordCatalog creates a new instance of RootCoordCatalog. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRootCoordCatalog(t interface { + mock.TestingT + Cleanup(func()) +}) *RootCoordCatalog { + mock := &RootCoordCatalog{} + mock.Mock.Test(t) -func (_c *RootCoordCatalog_ListPrivilegeGroups_Call) Return(_a0 []*milvuspb.PrivilegeGroupInfo, _a1 error) *RootCoordCatalog_ListPrivilegeGroups_Call { - _c.Call.Return(_a0, _a1) - return _c -} + t.Cleanup(func() { mock.AssertExpectations(t) }) -func (_c *RootCoordCatalog_ListPrivilegeGroups_Call) RunAndReturn(run func(context.Context) ([]*milvuspb.PrivilegeGroupInfo, error)) *RootCoordCatalog_ListPrivilegeGroups_Call { - _c.Call.Return(run) - return _c + return mock } diff --git a/internal/mocks/mock_rootcoord.go b/internal/mocks/mock_rootcoord.go index 0ecf1b9ca01fc..6d80a7cb3f2d0 100644 --- a/internal/mocks/mock_rootcoord.go +++ b/internal/mocks/mock_rootcoord.go @@ -3241,6 +3241,65 @@ func (_c *RootCoord_ShowCollections_Call) RunAndReturn(run func(context.Context, return _c } +// ShowCollectionsInternal provides a mock function with given fields: _a0, _a1 +func (_m *RootCoord) ShowCollectionsInternal(_a0 context.Context, _a1 *rootcoordpb.ShowCollectionsInternalRequest) (*rootcoordpb.ShowCollectionsInternalResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for ShowCollectionsInternal") + } + + var r0 *rootcoordpb.ShowCollectionsInternalResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *rootcoordpb.ShowCollectionsInternalRequest) (*rootcoordpb.ShowCollectionsInternalResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *rootcoordpb.ShowCollectionsInternalRequest) *rootcoordpb.ShowCollectionsInternalResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rootcoordpb.ShowCollectionsInternalResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *rootcoordpb.ShowCollectionsInternalRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootCoord_ShowCollectionsInternal_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ShowCollectionsInternal' +type RootCoord_ShowCollectionsInternal_Call struct { + *mock.Call +} + +// ShowCollectionsInternal is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *rootcoordpb.ShowCollectionsInternalRequest +func (_e *RootCoord_Expecter) ShowCollectionsInternal(_a0 interface{}, _a1 interface{}) *RootCoord_ShowCollectionsInternal_Call { + return &RootCoord_ShowCollectionsInternal_Call{Call: _e.mock.On("ShowCollectionsInternal", _a0, _a1)} +} + +func (_c *RootCoord_ShowCollectionsInternal_Call) Run(run func(_a0 context.Context, _a1 *rootcoordpb.ShowCollectionsInternalRequest)) *RootCoord_ShowCollectionsInternal_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*rootcoordpb.ShowCollectionsInternalRequest)) + }) + return _c +} + +func (_c *RootCoord_ShowCollectionsInternal_Call) Return(_a0 *rootcoordpb.ShowCollectionsInternalResponse, _a1 error) *RootCoord_ShowCollectionsInternal_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *RootCoord_ShowCollectionsInternal_Call) RunAndReturn(run func(context.Context, *rootcoordpb.ShowCollectionsInternalRequest) (*rootcoordpb.ShowCollectionsInternalResponse, error)) *RootCoord_ShowCollectionsInternal_Call { + _c.Call.Return(run) + return _c +} + // ShowConfigurations provides a mock function with given fields: _a0, _a1 func (_m *RootCoord) ShowConfigurations(_a0 context.Context, _a1 *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) { ret := _m.Called(_a0, _a1) diff --git a/internal/mocks/mock_rootcoord_client.go b/internal/mocks/mock_rootcoord_client.go index 6542d4ea64545..ae00637d50e71 100644 --- a/internal/mocks/mock_rootcoord_client.go +++ b/internal/mocks/mock_rootcoord_client.go @@ -3704,6 +3704,80 @@ func (_c *MockRootCoordClient_ShowCollections_Call) RunAndReturn(run func(contex return _c } +// ShowCollectionsInternal provides a mock function with given fields: ctx, in, opts +func (_m *MockRootCoordClient) ShowCollectionsInternal(ctx context.Context, in *rootcoordpb.ShowCollectionsInternalRequest, opts ...grpc.CallOption) (*rootcoordpb.ShowCollectionsInternalResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ShowCollectionsInternal") + } + + var r0 *rootcoordpb.ShowCollectionsInternalResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *rootcoordpb.ShowCollectionsInternalRequest, ...grpc.CallOption) (*rootcoordpb.ShowCollectionsInternalResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *rootcoordpb.ShowCollectionsInternalRequest, ...grpc.CallOption) *rootcoordpb.ShowCollectionsInternalResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rootcoordpb.ShowCollectionsInternalResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *rootcoordpb.ShowCollectionsInternalRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockRootCoordClient_ShowCollectionsInternal_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ShowCollectionsInternal' +type MockRootCoordClient_ShowCollectionsInternal_Call struct { + *mock.Call +} + +// ShowCollectionsInternal is a helper method to define mock.On call +// - ctx context.Context +// - in *rootcoordpb.ShowCollectionsInternalRequest +// - opts ...grpc.CallOption +func (_e *MockRootCoordClient_Expecter) ShowCollectionsInternal(ctx interface{}, in interface{}, opts ...interface{}) *MockRootCoordClient_ShowCollectionsInternal_Call { + return &MockRootCoordClient_ShowCollectionsInternal_Call{Call: _e.mock.On("ShowCollectionsInternal", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *MockRootCoordClient_ShowCollectionsInternal_Call) Run(run func(ctx context.Context, in *rootcoordpb.ShowCollectionsInternalRequest, opts ...grpc.CallOption)) *MockRootCoordClient_ShowCollectionsInternal_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*rootcoordpb.ShowCollectionsInternalRequest), variadicArgs...) + }) + return _c +} + +func (_c *MockRootCoordClient_ShowCollectionsInternal_Call) Return(_a0 *rootcoordpb.ShowCollectionsInternalResponse, _a1 error) *MockRootCoordClient_ShowCollectionsInternal_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockRootCoordClient_ShowCollectionsInternal_Call) RunAndReturn(run func(context.Context, *rootcoordpb.ShowCollectionsInternalRequest, ...grpc.CallOption) (*rootcoordpb.ShowCollectionsInternalResponse, error)) *MockRootCoordClient_ShowCollectionsInternal_Call { + _c.Call.Return(run) + return _c +} + // ShowConfigurations provides a mock function with given fields: ctx, in, opts func (_m *MockRootCoordClient) ShowConfigurations(ctx context.Context, in *internalpb.ShowConfigurationsRequest, opts ...grpc.CallOption) (*internalpb.ShowConfigurationsResponse, error) { _va := make([]interface{}, len(opts)) diff --git a/internal/proto/root_coord.proto b/internal/proto/root_coord.proto index 3efe6fe7340b0..953780c8b27cc 100644 --- a/internal/proto/root_coord.proto +++ b/internal/proto/root_coord.proto @@ -63,6 +63,8 @@ service RootCoord { */ rpc ShowCollections(milvus.ShowCollectionsRequest) returns (milvus.ShowCollectionsResponse) {} + rpc ShowCollectionsInternal(ShowCollectionsInternalRequest) returns (ShowCollectionsInternalResponse) {} + rpc AlterCollection(milvus.AlterCollectionRequest) returns (common.Status) {} rpc AlterCollectionField(milvus.AlterCollectionFieldRequest) returns (common.Status) {} @@ -245,4 +247,19 @@ message CollectionInfoOnPChannel { message PartitionInfoOnPChannel { int64 partition_id = 1; -} \ No newline at end of file +} + +message ShowCollectionsInternalRequest { + common.MsgBase base = 1; + repeated string db_names = 2; +} + +message DBCollections { + string db_name = 1; + repeated int64 collectionIDs = 2; +} + +message ShowCollectionsInternalResponse { + common.Status status = 1; + repeated DBCollections db_collections = 2; +} diff --git a/internal/proxy/rootcoord_mock_test.go b/internal/proxy/rootcoord_mock_test.go index 513c9412007dc..62d4238483e94 100644 --- a/internal/proxy/rootcoord_mock_test.go +++ b/internal/proxy/rootcoord_mock_test.go @@ -576,6 +576,10 @@ func (coord *RootCoordMock) ShowCollections(ctx context.Context, req *milvuspb.S }, nil } +func (coord *RootCoordMock) ShowCollectionsInternal(ctx context.Context, req *rootcoordpb.ShowCollectionsInternalRequest, opts ...grpc.CallOption) (*rootcoordpb.ShowCollectionsInternalResponse, error) { + panic("implements me") +} + func (coord *RootCoordMock) CreatePartition(ctx context.Context, req *milvuspb.CreatePartitionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { code := coord.state.Load().(commonpb.StateCode) if code != commonpb.StateCode_Healthy { diff --git a/internal/querycoordv2/meta/collection_manager.go b/internal/querycoordv2/meta/collection_manager.go index 8bef5b7f5a33f..f1d6c61363c54 100644 --- a/internal/querycoordv2/meta/collection_manager.go +++ b/internal/querycoordv2/meta/collection_manager.go @@ -123,18 +123,24 @@ func NewCollectionManager(catalog metastore.QueryCoordCatalog) *CollectionManage // Recover recovers collections from kv store, // panics if failed func (m *CollectionManager) Recover(ctx context.Context, broker Broker) error { + start := time.Now() collections, err := m.catalog.GetCollections(ctx) if err != nil { return err } - partitions, err := m.catalog.GetPartitions(ctx) + log.Ctx(ctx).Info("recover collections from kv store", zap.Duration("dur", time.Since(start))) + + start = time.Now() + partitions, err := m.catalog.GetPartitions(ctx, lo.Map(collections, func(collection *querypb.CollectionLoadInfo, _ int) int64 { + return collection.GetCollectionID() + })) if err != nil { return err } ctx = log.WithTraceID(ctx, strconv.FormatInt(time.Now().UnixNano(), 10)) ctxLog := log.Ctx(ctx) - ctxLog.Info("recover collections and partitions from kv store") + ctxLog.Info("recover partitions from kv store", zap.Duration("dur", time.Since(start))) for _, collection := range collections { if collection.GetReplicaNumber() <= 0 { diff --git a/internal/rootcoord/meta_table.go b/internal/rootcoord/meta_table.go index 14d6e5a135262..c8817234393b4 100644 --- a/internal/rootcoord/meta_table.go +++ b/internal/rootcoord/meta_table.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "sync" + "time" "github.com/cockroachdb/errors" "github.com/samber/lo" @@ -190,6 +191,9 @@ func (mt *MetaTable) reload() error { collectionNum := int64(0) mt.names.createDbIfNotExist(dbName) + + start := time.Now() + // TODO: async list collections to accelerate cases with multiple databases. collections, err := mt.catalog.ListCollections(mt.ctx, db.ID, typeutil.MaxTimestamp) if err != nil { return err @@ -209,7 +213,8 @@ func (mt *MetaTable) reload() error { metrics.RootCoordNumOfPartitions.WithLabelValues().Add(float64(partitionNum)) log.Info("collections recovered from db", zap.String("db_name", dbName), zap.Int64("collection_num", collectionNum), - zap.Int64("partition_num", partitionNum)) + zap.Int64("partition_num", partitionNum), + zap.Duration("dur", time.Since(start))) } // recover aliases from db namespace diff --git a/internal/rootcoord/meta_table_test.go b/internal/rootcoord/meta_table_test.go index 3503b2f27de3d..8ecb6b6cbc1c4 100644 --- a/internal/rootcoord/meta_table_test.go +++ b/internal/rootcoord/meta_table_test.go @@ -43,7 +43,7 @@ import ( ) func generateMetaTable(t *testing.T) *MetaTable { - return &MetaTable{catalog: &rootcoord.Catalog{Txn: memkv.NewMemoryKV()}} + return &MetaTable{catalog: rootcoord.NewCatalog(memkv.NewMemoryKV(), nil)} } func TestRbacAddCredential(t *testing.T) { diff --git a/internal/rootcoord/root_coord.go b/internal/rootcoord/root_coord.go index 25e031b39e884..7bd645e0a9c43 100644 --- a/internal/rootcoord/root_coord.go +++ b/internal/rootcoord/root_coord.go @@ -356,7 +356,7 @@ func (c *Core) initMetaTable() error { if ss, err = kvmetestore.NewSuffixSnapshot(metaKV, kvmetestore.SnapshotsSep, Params.EtcdCfg.MetaRootPath.GetValue(), kvmetestore.SnapshotPrefix); err != nil { return err } - catalog = &kvmetestore.Catalog{Txn: metaKV, Snapshot: ss} + catalog = kvmetestore.NewCatalog(metaKV, ss) case util.MetaStoreTypeTiKV: log.Info("Using tikv as meta storage.") var metaKV kv.MetaKv @@ -370,7 +370,7 @@ func (c *Core) initMetaTable() error { if ss, err = kvmetestore.NewSuffixSnapshot(metaKV, kvmetestore.SnapshotsSep, Params.TiKVCfg.MetaRootPath.GetValue(), kvmetestore.SnapshotPrefix); err != nil { return err } - catalog = &kvmetestore.Catalog{Txn: metaKV, Snapshot: ss} + catalog = kvmetestore.NewCatalog(metaKV, ss) default: return retry.Unrecoverable(fmt.Errorf("not supported meta store: %s", Params.MetaStoreCfg.MetaStoreType.GetValue())) } @@ -1345,6 +1345,73 @@ func (c *Core) ShowCollections(ctx context.Context, in *milvuspb.ShowCollections return t.Rsp, nil } +// ShowCollectionsInternal returns all collections, including unhealthy ones. +func (c *Core) ShowCollectionsInternal(ctx context.Context, in *rootcoordpb.ShowCollectionsInternalRequest) (*rootcoordpb.ShowCollectionsInternalResponse, error) { + if err := merr.CheckHealthy(c.GetStateCode()); err != nil { + return &rootcoordpb.ShowCollectionsInternalResponse{ + Status: merr.Status(err), + }, nil + } + + metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollectionsInternal", metrics.TotalLabel).Inc() + tr := timerecord.NewTimeRecorder("ShowCollectionsInternal") + + ts := typeutil.MaxTimestamp + log := log.Ctx(ctx).With(zap.Strings("dbNames", in.GetDbNames())) + + // Currently, this interface is only called during startup, so there is no need to execute it within the scheduler. + var err error + var dbs []*model.Database + if len(in.GetDbNames()) == 0 { + // show all collections + dbs, err = c.meta.ListDatabases(ctx, ts) + if err != nil { + log.Info("failed to ListDatabases", zap.Error(err)) + metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollectionsInternal", metrics.FailLabel).Inc() + return &rootcoordpb.ShowCollectionsInternalResponse{ + Status: merr.Status(err), + }, nil + } + } else { + dbs = make([]*model.Database, 0, len(in.GetDbNames())) + for _, name := range in.GetDbNames() { + db, err := c.meta.GetDatabaseByName(ctx, name, ts) + if err != nil { + log.Info("failed to GetDatabaseByName", zap.Error(err)) + metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollectionsInternal", metrics.FailLabel).Inc() + return &rootcoordpb.ShowCollectionsInternalResponse{ + Status: merr.Status(err), + }, nil + } + dbs = append(dbs, db) + } + } + dbCollections := make([]*rootcoordpb.DBCollections, 0, len(dbs)) + for _, db := range dbs { + collections, err := c.meta.ListCollections(ctx, db.Name, ts, false) + if err != nil { + log.Info("failed to ListCollections", zap.Error(err)) + metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollectionsInternal", metrics.FailLabel).Inc() + return &rootcoordpb.ShowCollectionsInternalResponse{ + Status: merr.Status(err), + }, nil + } + dbCollections = append(dbCollections, &rootcoordpb.DBCollections{ + DbName: db.Name, + CollectionIDs: lo.Map(collections, func(col *model.Collection, _ int) int64 { + return col.CollectionID + }), + }) + } + metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollectionsInternal", metrics.SuccessLabel).Inc() + metrics.RootCoordDDLReqLatency.WithLabelValues("ShowCollectionsInternal").Observe(float64(tr.ElapseSpan().Milliseconds())) + + return &rootcoordpb.ShowCollectionsInternalResponse{ + Status: merr.Success(), + DbCollections: dbCollections, + }, nil +} + func (c *Core) AlterCollection(ctx context.Context, in *milvuspb.AlterCollectionRequest) (*commonpb.Status, error) { if err := merr.CheckHealthy(c.GetStateCode()); err != nil { return merr.Status(err), nil diff --git a/internal/rootcoord/root_coord_test.go b/internal/rootcoord/root_coord_test.go index b8915e5deab79..44830631bca1d 100644 --- a/internal/rootcoord/root_coord_test.go +++ b/internal/rootcoord/root_coord_test.go @@ -730,6 +730,72 @@ func TestRootCoord_ShowCollections(t *testing.T) { }) } +func TestRootCoord_ShowCollectionsInternal(t *testing.T) { + t.Run("not healthy", func(t *testing.T) { + c := newTestCore(withAbnormalCode()) + ctx := context.Background() + resp, err := c.ShowCollectionsInternal(ctx, &rootcoordpb.ShowCollectionsInternalRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("test failed", func(t *testing.T) { + c := newTestCore(withHealthyCode()) + meta := mockrootcoord.NewIMetaTable(t) + c.meta = meta + + ctx := context.Background() + + // specify db names + meta.EXPECT().GetDatabaseByName(mock.Anything, mock.Anything, typeutil.MaxTimestamp).Return(nil, fmt.Errorf("mock err")) + resp, err := c.ShowCollectionsInternal(ctx, &rootcoordpb.ShowCollectionsInternalRequest{ + DbNames: []string{"db1"}, + }) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + + // not specify db names + meta.EXPECT().ListDatabases(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("mock err")) + resp, err = c.ShowCollectionsInternal(ctx, &rootcoordpb.ShowCollectionsInternalRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + + // list collections failed + meta.ExpectedCalls = nil + meta.EXPECT().ListDatabases(mock.Anything, mock.Anything).Return( + []*model.Database{model.NewDatabase(rand.Int63(), "db1", etcdpb.DatabaseState_DatabaseCreated, nil)}, nil) + meta.EXPECT().ListCollections(mock.Anything, mock.Anything, typeutil.MaxTimestamp, false).Return(nil, fmt.Errorf("mock err")) + resp, err = c.ShowCollectionsInternal(ctx, &rootcoordpb.ShowCollectionsInternalRequest{}) + assert.NoError(t, err) + assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) + + t.Run("normal case, everything is ok", func(t *testing.T) { + c := newTestCore(withHealthyCode()) + meta := mockrootcoord.NewIMetaTable(t) + meta.EXPECT().ListCollections(mock.Anything, mock.Anything, typeutil.MaxTimestamp, false).Return([]*model.Collection{}, nil) + c.meta = meta + + ctx := context.Background() + + // specify db names + meta.EXPECT().GetDatabaseByName(mock.Anything, mock.Anything, typeutil.MaxTimestamp).Return( + model.NewDatabase(rand.Int63(), "db1", etcdpb.DatabaseState_DatabaseCreated, nil), nil) + resp, err := c.ShowCollectionsInternal(ctx, &rootcoordpb.ShowCollectionsInternalRequest{ + DbNames: []string{"db1"}, + }) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + + // not specify db names + meta.EXPECT().ListDatabases(mock.Anything, mock.Anything).Return( + []*model.Database{model.NewDatabase(rand.Int63(), "db1", etcdpb.DatabaseState_DatabaseCreated, nil)}, nil) + resp, err = c.ShowCollectionsInternal(ctx, &rootcoordpb.ShowCollectionsInternalRequest{}) + assert.NoError(t, err) + assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) + }) +} + func TestRootCoord_HasPartition(t *testing.T) { t.Run("not healthy", func(t *testing.T) { c := newTestCore(withAbnormalCode()) diff --git a/internal/util/mock/grpc_rootcoord_client.go b/internal/util/mock/grpc_rootcoord_client.go index abf856d0be9c0..c50d7b3cc7597 100644 --- a/internal/util/mock/grpc_rootcoord_client.go +++ b/internal/util/mock/grpc_rootcoord_client.go @@ -158,6 +158,10 @@ func (m *GrpcRootCoordClient) ShowCollections(ctx context.Context, in *milvuspb. return &milvuspb.ShowCollectionsResponse{}, m.Err } +func (m *GrpcRootCoordClient) ShowCollectionsInternal(ctx context.Context, in *rootcoordpb.ShowCollectionsInternalRequest, opts ...grpc.CallOption) (*rootcoordpb.ShowCollectionsInternalResponse, error) { + return &rootcoordpb.ShowCollectionsInternalResponse{}, m.Err +} + func (m *GrpcRootCoordClient) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { return &commonpb.Status{}, m.Err } diff --git a/pkg/util/paramtable/service_param.go b/pkg/util/paramtable/service_param.go index 42bff06595e40..0f1babc3aee5b 100644 --- a/pkg/util/paramtable/service_param.go +++ b/pkg/util/paramtable/service_param.go @@ -459,6 +459,7 @@ type MetaStoreConfig struct { SnapshotTTLSeconds ParamItem `refreshable:"true"` SnapshotReserveTimeSeconds ParamItem `refreshable:"true"` PaginationSize ParamItem `refreshable:"true"` + ReadConcurrency ParamItem `refreshable:"true"` } func (p *MetaStoreConfig) Init(base *BaseTable) { @@ -492,11 +493,19 @@ func (p *MetaStoreConfig) Init(base *BaseTable) { p.PaginationSize = ParamItem{ Key: "metastore.paginationSize", Version: "2.5.1", - DefaultValue: "10000", + DefaultValue: "100000", Doc: `limits the number of results to return from metastore.`, } p.PaginationSize.Init(base.mgr) + p.ReadConcurrency = ParamItem{ + Key: "metastore.readConcurrency", + Version: "2.5.1", + DefaultValue: "32", + Doc: `read concurrency for fetching metadata from the metastore.`, + } + p.ReadConcurrency.Init(base.mgr) + // TODO: The initialization operation of metadata storage is called in the initialization phase of every node. // There should be a single initialization operation for meta store, then move the metrics registration to there. metrics.RegisterMetaType(p.MetaStoreType.GetValue()) diff --git a/pkg/util/paramtable/service_param_test.go b/pkg/util/paramtable/service_param_test.go index 404ef45e6d0ea..9a4215f40cb92 100644 --- a/pkg/util/paramtable/service_param_test.go +++ b/pkg/util/paramtable/service_param_test.go @@ -218,6 +218,7 @@ func TestServiceParam(t *testing.T) { assert.Equal(t, util.MetaStoreTypeEtcd, Params.MetaStoreType.GetValue()) assert.Equal(t, 86400*time.Second, Params.SnapshotTTLSeconds.GetAsDuration(time.Second)) assert.Equal(t, 3600*time.Second, Params.SnapshotReserveTimeSeconds.GetAsDuration(time.Second)) - assert.Equal(t, 10000, Params.PaginationSize.GetAsInt()) + assert.Equal(t, 100000, Params.PaginationSize.GetAsInt()) + assert.Equal(t, 32, Params.ReadConcurrency.GetAsInt()) }) }