From c82d902189f3ae80bce843865093b611a0a7d48c Mon Sep 17 00:00:00 2001 From: congqixia Date: Thu, 7 Nov 2024 20:24:18 +0800 Subject: [PATCH] enhance: [backport] Move collection meta check to server side (#835) (#842) Backport from v2.4.x pr: #835 Move collection/partition validation logic to server side in case of frequent check rpc. Signed-off-by: Congqi Xia --- client/collection.go | 30 -- client/collection_test.go | 411 ++++++++++++-------------- client/data.go | 16 - client/data_test.go | 149 +--------- client/insert.go | 36 +-- client/insert_test.go | 119 ++++++++ client/maintainance.go | 3 - client/partition.go | 47 +-- client/partition_test.go | 222 +++++++------- client/row.go | 23 +- client/row_test.go | 54 +--- test/testcases/collection_test.go | 28 +- test/testcases/compact_test.go | 14 +- test/testcases/delete_test.go | 4 +- test/testcases/flush_test.go | 2 +- test/testcases/index_test.go | 2 +- test/testcases/iterator_test.go | 53 ++++ test/testcases/load_release_test.go | 203 ++++++++----- test/testcases/resource_group_test.go | 2 +- test/testcases/ut.log | 109 +++++++ 20 files changed, 757 insertions(+), 770 deletions(-) create mode 100644 test/testcases/iterator_test.go create mode 100644 test/testcases/ut.log diff --git a/client/collection.go b/client/collection.go index 359e2501..65efe504 100644 --- a/client/collection.go +++ b/client/collection.go @@ -301,9 +301,6 @@ func (c *GrpcClient) DropCollection(ctx context.Context, collName string, opts . if c.Service == nil { return ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return err - } req := &milvuspb.DropCollectionRequest{ CollectionName: collName, @@ -350,10 +347,6 @@ func (c *GrpcClient) GetCollectionStatistics(ctx context.Context, collName strin return nil, ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return nil, err - } - req := &milvuspb.GetCollectionStatisticsRequest{ CollectionName: collName, } @@ -372,9 +365,6 @@ func (c *GrpcClient) ShowCollection(ctx context.Context, collName string) (*enti if c.Service == nil { return nil, ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return nil, err - } req := &milvuspb.ShowCollectionsRequest{ Type: milvuspb.ShowType_InMemory, @@ -404,10 +394,6 @@ func (c *GrpcClient) RenameCollection(ctx context.Context, collName, newName str return ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return err - } - req := &milvuspb.RenameCollectionRequest{ OldName: collName, NewName: newName, @@ -425,10 +411,6 @@ func (c *GrpcClient) LoadCollection(ctx context.Context, collName string, async return ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return err - } - req := &milvuspb.LoadCollectionRequest{ CollectionName: collName, } @@ -471,9 +453,6 @@ func (c *GrpcClient) ReleaseCollection(ctx context.Context, collName string, opt if c.Service == nil { return ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return err - } req := &milvuspb.ReleaseCollectionRequest{ DbName: "", // reserved @@ -537,9 +516,6 @@ func (c *GrpcClient) GetLoadingProgress(ctx context.Context, collName string, pa if c.Service == nil { return 0, ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return 0, err - } req := &milvuspb.GetLoadingProgressRequest{ CollectionName: collName, @@ -558,9 +534,6 @@ func (c *GrpcClient) GetLoadState(ctx context.Context, collName string, partitio if c.Service == nil { return 0, ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return 0, err - } req := &milvuspb.GetLoadStateRequest{ CollectionName: collName, @@ -579,9 +552,6 @@ func (c *GrpcClient) AlterCollection(ctx context.Context, collName string, attrs if c.Service == nil { return ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return err - } if len(attrs) == 0 { return errors.New("no collection attribute provided") diff --git a/client/collection_test.go b/client/collection_test.go index 91baba02..7aaffa73 100644 --- a/client/collection_test.go +++ b/client/collection_test.go @@ -417,30 +417,17 @@ func (s *CollectionSuite) TestRenameCollection() { newCollName := fmt.Sprintf("new_%s", randStr(6)) - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}).Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: true}, nil) s.mock.EXPECT().RenameCollection(mock.Anything, &milvuspb.RenameCollectionRequest{OldName: testCollectionName, NewName: newCollName}).Return(&commonpb.Status{}, nil) err := c.RenameCollection(ctx, testCollectionName, newCollName) s.NoError(err) }) - s.Run("coll_not_exist", func() { - defer s.resetMock() - - newCollName := fmt.Sprintf("new_%s", randStr(6)) - - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}).Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: false}, nil) - - err := c.RenameCollection(ctx, testCollectionName, newCollName) - s.Error(err) - }) - s.Run("rename_failed", func() { defer s.resetMock() newCollName := fmt.Sprintf("new_%s", randStr(6)) - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}).Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: true}, nil) s.mock.EXPECT().RenameCollection(mock.Anything, &milvuspb.RenameCollectionRequest{OldName: testCollectionName, NewName: newCollName}).Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_UnexpectedError, Reason: "mocked failure"}, nil) err := c.RenameCollection(ctx, testCollectionName, newCollName) @@ -452,7 +439,6 @@ func (s *CollectionSuite) TestRenameCollection() { newCollName := fmt.Sprintf("new_%s", randStr(6)) - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}).Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: true}, nil) s.mock.EXPECT().RenameCollection(mock.Anything, &milvuspb.RenameCollectionRequest{OldName: testCollectionName, NewName: newCollName}).Return(nil, errors.New("mocked error")) err := c.RenameCollection(ctx, testCollectionName, newCollName) @@ -468,7 +454,6 @@ func (s *CollectionSuite) TestAlterCollection() { s.Run("normal_run", func() { defer s.resetMock() - s.setupHasCollection(testCollectionName) s.mock.EXPECT().AlterCollection(mock.Anything, mock.AnythingOfType("*milvuspb.AlterCollectionRequest")). Return(&commonpb.Status{}, nil) @@ -479,31 +464,15 @@ func (s *CollectionSuite) TestAlterCollection() { s.NoError(err) }) - s.Run("collection_not_exist", func() { - defer s.resetMock() - - s.mock.EXPECT().HasCollection(mock.Anything, mock.AnythingOfType("*milvuspb.HasCollectionRequest")). - Return(&milvuspb.BoolResponse{ - Status: &commonpb.Status{}, - Value: false, - }, nil) - - err := c.AlterCollection(ctx, testCollectionName, entity.CollectionTTL(100000)) - s.Error(err) - }) - s.Run("no_attributes", func() { defer s.resetMock() - s.setupHasCollection(testCollectionName) err := c.AlterCollection(ctx, testCollectionName) s.Error(err) }) s.Run("request_fails", func() { defer s.resetMock() - - s.setupHasCollection(testCollectionName) s.mock.EXPECT().AlterCollection(mock.Anything, mock.AnythingOfType("*milvuspb.AlterCollectionRequest")). Return(nil, errors.New("mocked")) @@ -514,7 +483,6 @@ func (s *CollectionSuite) TestAlterCollection() { s.Run("server_return_error", func() { defer s.resetMock() - s.setupHasCollection(testCollectionName) s.mock.EXPECT().AlterCollection(mock.Anything, mock.AnythingOfType("*milvuspb.AlterCollectionRequest")). Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_UnexpectedError}, nil) @@ -537,8 +505,6 @@ func (s *CollectionSuite) TestLoadCollection() { s.Run("normal_run_async", func() { defer s.resetMock() - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}). - Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: true}, nil) s.mock.EXPECT().LoadCollection(mock.Anything, mock.Anything).Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil) @@ -548,8 +514,6 @@ func (s *CollectionSuite) TestLoadCollection() { s.Run("normal_run_sync", func() { defer s.resetMock() - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}). - Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: true}, nil) s.mock.EXPECT().LoadCollection(mock.Anything, mock.Anything).Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil) s.mock.EXPECT().GetLoadingProgress(mock.Anything, mock.Anything). @@ -564,8 +528,6 @@ func (s *CollectionSuite) TestLoadCollection() { s.Run("load_default_replica", func() { defer s.resetMock() - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}). - Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: true}, nil) s.mock.EXPECT().LoadCollection(mock.Anything, mock.Anything).Run(func(_ context.Context, req *milvuspb.LoadCollectionRequest) { s.Equal(int32(0), req.GetReplicaNumber()) @@ -578,8 +540,6 @@ func (s *CollectionSuite) TestLoadCollection() { s.Run("load_multiple_replica", func() { defer s.resetMock() - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}). - Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: true}, nil) s.mock.EXPECT().LoadCollection(mock.Anything, mock.Anything).Run(func(_ context.Context, req *milvuspb.LoadCollectionRequest) { s.Equal(testMultiReplicaNumber, req.GetReplicaNumber()) @@ -590,31 +550,9 @@ func (s *CollectionSuite) TestLoadCollection() { s.NoError(err) }) - s.Run("has_collection_failure", func() { - s.Run("return_false", func() { - defer s.resetMock() - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}). - Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: false}, nil) - - err := c.LoadCollection(ctx, testCollectionName, true) - s.Error(err) - }) - - s.Run("return_error", func() { - defer s.resetMock() - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}). - Return(nil, errors.New("mock error")) - - err := c.LoadCollection(ctx, testCollectionName, true) - s.Error(err) - }) - }) - s.Run("load_collection_failure", func() { s.Run("failure_status", func() { defer s.resetMock() - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}). - Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: true}, nil) s.mock.EXPECT().LoadCollection(mock.Anything, mock.Anything). Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_UnexpectedError}, nil) @@ -624,9 +562,6 @@ func (s *CollectionSuite) TestLoadCollection() { }) s.Run("return_error", func() { - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}). - Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: true}, nil) - s.mock.EXPECT().LoadCollection(mock.Anything, mock.Anything). Return(nil, errors.New("mock error")) @@ -637,8 +572,6 @@ func (s *CollectionSuite) TestLoadCollection() { s.Run("get_loading_progress_failure", func() { defer s.resetMock() - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}). - Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: true}, nil) s.mock.EXPECT().LoadCollection(mock.Anything, mock.Anything).Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil) s.mock.EXPECT().GetLoadingProgress(mock.Anything, mock.Anything). @@ -655,6 +588,188 @@ func (s *CollectionSuite) TestLoadCollection() { }) } +func (s *CollectionSuite) TestDropCollection() { + c := s.client + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + collectionName := fmt.Sprintf("coll_%s", randStr(6)) + + s.Run("normal_run", func() { + defer s.resetMock() + + s.mock.EXPECT().DropCollection(mock.Anything, mock.AnythingOfType("*milvuspb.DropCollectionRequest")).RunAndReturn(func(ctx context.Context, dcr *milvuspb.DropCollectionRequest) (*commonpb.Status, error) { + s.Equal(collectionName, dcr.GetCollectionName()) + return s.getSuccessStatus(), nil + }).Once() + + err := c.DropCollection(ctx, collectionName) + s.NoError(err) + }) + + s.Run("return_error", func() { + defer s.resetMock() + + s.mock.EXPECT().DropCollection(mock.Anything, mock.AnythingOfType("*milvuspb.DropCollectionRequest")).RunAndReturn(func(ctx context.Context, dcr *milvuspb.DropCollectionRequest) (*commonpb.Status, error) { + s.Equal(collectionName, dcr.GetCollectionName()) + return nil, errors.New("mock") + }).Once() + + err := c.DropCollection(ctx, collectionName) + s.Error(err) + }) +} + +func (s *CollectionSuite) TestGetCollectionStatistics() { + c := s.client + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + collectionName := fmt.Sprintf("coll_%s", randStr(6)) + stat := make(map[string]string) + stat["row_count"] = fmt.Sprintf("%d", rand.Int31n(10000)) + + s.Run("normal_case", func() { + defer s.resetMock() + + s.mock.EXPECT().GetCollectionStatistics(mock.Anything, mock.AnythingOfType("*milvuspb.GetCollectionStatisticsRequest")).RunAndReturn(func(ctx context.Context, dcr *milvuspb.GetCollectionStatisticsRequest) (*milvuspb.GetCollectionStatisticsResponse, error) { + s.Equal(collectionName, dcr.GetCollectionName()) + return &milvuspb.GetCollectionStatisticsResponse{ + Status: s.getSuccessStatus(), + Stats: entity.MapKvPairs(stat), + }, nil + }).Once() + + result, err := c.GetCollectionStatistics(ctx, collectionName) + s.NoError(err) + s.Len(result, len(stat)) + for k, v := range result { + s.Equal(v, stat[k]) + } + }) + + s.Run("server_error", func() { + defer s.resetMock() + + s.mock.EXPECT().GetCollectionStatistics(mock.Anything, mock.AnythingOfType("*milvuspb.GetCollectionStatisticsRequest")).RunAndReturn(func(ctx context.Context, dcr *milvuspb.GetCollectionStatisticsRequest) (*milvuspb.GetCollectionStatisticsResponse, error) { + s.Equal(collectionName, dcr.GetCollectionName()) + return nil, errors.New("mock") + }).Once() + + _, err := c.GetCollectionStatistics(ctx, collectionName) + s.Error(err) + }) +} + +func (s *CollectionSuite) TestReleaseCollection() { + c := s.client + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + collectionName := fmt.Sprintf("coll_%s", randStr(6)) + + s.Run("normal_run", func() { + defer s.resetMock() + + s.mock.EXPECT().ReleaseCollection(mock.Anything, mock.AnythingOfType("*milvuspb.ReleaseCollectionRequest")).RunAndReturn(func(ctx context.Context, dcr *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error) { + s.Equal(collectionName, dcr.GetCollectionName()) + return s.getSuccessStatus(), nil + }).Once() + + err := c.ReleaseCollection(ctx, collectionName) + s.NoError(err) + }) + + s.Run("return_error", func() { + defer s.resetMock() + + s.mock.EXPECT().ReleaseCollection(mock.Anything, mock.AnythingOfType("*milvuspb.ReleaseCollectionRequest")).RunAndReturn(func(ctx context.Context, dcr *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error) { + s.Equal(collectionName, dcr.GetCollectionName()) + return nil, errors.New("mock") + }).Once() + + err := c.ReleaseCollection(ctx, collectionName) + s.Error(err) + }) +} + +func (s *CollectionSuite) TestGetLoadingProgress() { + c := s.client + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + collectionName := fmt.Sprintf("coll_%s", randStr(6)) + partName := fmt.Sprintf("part_%s", randStr(6)) + loadProgress := rand.Int63n(100) + + s.Run("normal_run", func() { + defer s.resetMock() + + s.mock.EXPECT().GetLoadingProgress(mock.Anything, mock.AnythingOfType("*milvuspb.GetLoadingProgressRequest")).RunAndReturn(func(ctx context.Context, glpr *milvuspb.GetLoadingProgressRequest) (*milvuspb.GetLoadingProgressResponse, error) { + s.Equal(collectionName, glpr.GetCollectionName()) + return &milvuspb.GetLoadingProgressResponse{ + Status: s.getSuccessStatus(), + Progress: loadProgress, + }, nil + }).Once() + + percent, err := c.GetLoadingProgress(ctx, collectionName, []string{partName}) + s.NoError(err) + s.Equal(loadProgress, percent) + }) + + s.Run("return_error", func() { + defer s.resetMock() + + s.mock.EXPECT().GetLoadingProgress(mock.Anything, mock.AnythingOfType("*milvuspb.GetLoadingProgressRequest")).RunAndReturn(func(ctx context.Context, glpr *milvuspb.GetLoadingProgressRequest) (*milvuspb.GetLoadingProgressResponse, error) { + s.Equal(collectionName, glpr.GetCollectionName()) + return nil, errors.New("mock") + }).Once() + + _, err := c.GetLoadingProgress(ctx, collectionName, nil) + s.Error(err) + }) +} + +func (s *CollectionSuite) TestGetLoadState() { + c := s.client + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + collectionName := fmt.Sprintf("coll_%s", randStr(6)) + partName := fmt.Sprintf("part_%s", randStr(6)) + loadState := commonpb.LoadState(rand.Int31n(4)) + + s.Run("normal_run", func() { + defer s.resetMock() + + s.mock.EXPECT().GetLoadState(mock.Anything, mock.AnythingOfType("*milvuspb.GetLoadStateRequest")).RunAndReturn(func(ctx context.Context, glsr *milvuspb.GetLoadStateRequest) (*milvuspb.GetLoadStateResponse, error) { + s.Equal(collectionName, glsr.GetCollectionName()) + s.ElementsMatch([]string{partName}, glsr.GetPartitionNames()) + return &milvuspb.GetLoadStateResponse{ + Status: s.getSuccessStatus(), + State: loadState, + }, nil + }).Once() + + state, err := c.GetLoadState(ctx, collectionName, []string{partName}) + s.NoError(err) + s.EqualValues(loadState, state) + }) + + s.Run("return_error", func() { + defer s.resetMock() + + s.mock.EXPECT().GetLoadState(mock.Anything, mock.AnythingOfType("*milvuspb.GetLoadStateRequest")).RunAndReturn(func(ctx context.Context, glsr *milvuspb.GetLoadStateRequest) (*milvuspb.GetLoadStateResponse, error) { + s.Equal(collectionName, glsr.GetCollectionName()) + return nil, errors.New("mock") + }).Once() + + _, err := c.GetLoadState(ctx, collectionName, nil) + s.Error(err) + }) +} + func TestCollectionSuite(t *testing.T) { suite.Run(t, new(CollectionSuite)) } @@ -674,48 +789,6 @@ var hasCollectionDefault = func(_ context.Context, raw proto.Message) (proto.Mes return resp, err } -func TestGrpcClientDropCollection(t *testing.T) { - ctx := context.Background() - c := testClient(ctx, t) - - mockServer.SetInjection(MHasCollection, hasCollectionDefault) - mockServer.SetInjection(MDropCollection, func(_ context.Context, raw proto.Message) (proto.Message, error) { - req, ok := (raw).(*milvuspb.DropCollectionRequest) - if !ok { - return BadRequestStatus() - } - if req.GetCollectionName() != testCollectionName { // in mockServer.server, assume testCollection exists only - return BadRequestStatus() - } - return SuccessStatus() - }) - - t.Run("Test Normal drop", func(t *testing.T) { - assert.Nil(t, c.DropCollection(ctx, testCollectionName, WithDropCollectionMsgBase(&commonpb.MsgBase{}))) - }) - - t.Run("Test drop non-existing collection", func(t *testing.T) { - assert.NotNil(t, c.DropCollection(ctx, "AAAAAAAAAANonExists")) - }) -} - -func TestReleaseCollection(t *testing.T) { - ctx := context.Background() - - c := testClient(ctx, t) - - mockServer.SetInjection(MReleaseCollection, func(_ context.Context, raw proto.Message) (proto.Message, error) { - req, ok := raw.(*milvuspb.ReleaseCollectionRequest) - if !ok { - return BadRequestStatus() - } - assert.Equal(t, testCollectionName, req.GetCollectionName()) - return SuccessStatus() - }) - - c.ReleaseCollection(ctx, testCollectionName, WithReleaseCollectionMsgBase(&commonpb.MsgBase{})) -} - func TestGrpcClientHasCollection(t *testing.T) { ctx := context.Background() @@ -742,32 +815,6 @@ func TestGrpcClientHasCollection(t *testing.T) { assert.True(t, has) } -// return injection asserts collection name matchs -// partition name request in partitionNames if flag is true -func hasCollectionInjection(t *testing.T, mustIn bool, collNames ...string) func(context.Context, proto.Message) (proto.Message, error) { - return func(_ context.Context, raw proto.Message) (proto.Message, error) { - req, ok := raw.(*milvuspb.HasCollectionRequest) - resp := &milvuspb.BoolResponse{} - if !ok { - s, err := BadRequestStatus() - resp.Status = s - return resp, err - } - if mustIn { - resp.Value = assert.Contains(t, collNames, req.GetCollectionName()) - } else { - for _, pn := range collNames { - if pn == req.GetCollectionName() { - resp.Value = true - } - } - } - s, err := SuccessStatus() - resp.Status = s - return resp, err - } -} - func describeCollectionInjection(t *testing.T, collID int64, collName string, sch *entity.Schema) func(_ context.Context, raw proto.Message) (proto.Message, error) { return func(_ context.Context, raw proto.Message) (proto.Message, error) { req, ok := raw.(*milvuspb.DescribeCollectionRequest) @@ -807,39 +854,6 @@ func TestGrpcClientDescribeCollection(t *testing.T) { } } -func TestGrpcClientGetCollectionStatistics(t *testing.T) { - ctx := context.Background() - - c := testClient(ctx, t) - - stat := make(map[string]string) - stat["row_count"] = "0" - - mockServer.SetInjection(MGetCollectionStatistics, func(_ context.Context, raw proto.Message) (proto.Message, error) { - req, ok := raw.(*milvuspb.GetCollectionStatisticsRequest) - resp := &milvuspb.GetCollectionStatisticsResponse{} - if !ok { - s, err := BadRequestStatus() - resp.Status = s - return resp, err - } - assert.Equal(t, testCollectionName, req.GetCollectionName()) - s, err := SuccessStatus() - resp.Status, resp.Stats = s, entity.MapKvPairs(stat) - return resp, err - }) - - rStat, err := c.GetCollectionStatistics(ctx, testCollectionName) - assert.Nil(t, err) - if assert.NotNil(t, rStat) { - for k, v := range stat { - rv, has := rStat[k] - assert.True(t, has) - assert.Equal(t, v, rv) - } - } -} - func TestGrpcClientGetReplicas(t *testing.T) { ctx := context.Background() c := testClient(ctx, t) @@ -902,11 +916,6 @@ func TestGrpcClientGetReplicas(t *testing.T) { assert.Equal(t, 2, len(groups[0].ShardReplicas)) }) - t.Run("get replicas invalid name", func(t *testing.T) { - _, err := c.GetReplicas(ctx, "invalid name") - assert.Error(t, err) - }) - t.Run("get replicas grpc error", func(t *testing.T) { mockServer.SetInjection(MGetReplicas, func(ctx context.Context, raw proto.Message) (proto.Message, error) { return &milvuspb.GetReplicasResponse{}, errors.New("mockServer.d grpc error") @@ -931,59 +940,3 @@ func TestGrpcClientGetReplicas(t *testing.T) { mockServer.DelInjection(MGetReplicas) } - -func TestGrpcClientGetLoadingProgress(t *testing.T) { - ctx := context.Background() - c := testClient(ctx, t) - - mockServer.SetInjection(MHasCollection, hasCollectionDefault) - - mockServer.SetInjection(MGetLoadingProgress, func(_ context.Context, raw proto.Message) (proto.Message, error) { - req, ok := raw.(*milvuspb.GetLoadingProgressRequest) - if !ok { - return BadRequestStatus() - } - resp := &milvuspb.GetLoadingProgressResponse{} - if !ok { - s, err := BadRequestStatus() - resp.Status = s - return resp, err - } - assert.Equal(t, testCollectionName, req.GetCollectionName()) - s, err := SuccessStatus() - resp.Status, resp.Progress = s, 100 - return resp, err - }) - - progress, err := c.GetLoadingProgress(ctx, testCollectionName, []string{}) - assert.NoError(t, err) - assert.Equal(t, int64(100), progress) -} - -func TestGrpcClientGetLoadState(t *testing.T) { - ctx := context.Background() - c := testClient(ctx, t) - - mockServer.SetInjection(MHasCollection, hasCollectionDefault) - - mockServer.SetInjection(MGetLoadState, func(_ context.Context, raw proto.Message) (proto.Message, error) { - req, ok := raw.(*milvuspb.GetLoadStateRequest) - if !ok { - return BadRequestStatus() - } - resp := &milvuspb.GetLoadStateResponse{} - if !ok { - s, err := BadRequestStatus() - resp.Status = s - return resp, err - } - assert.Equal(t, testCollectionName, req.GetCollectionName()) - s, err := SuccessStatus() - resp.Status, resp.State = s, commonpb.LoadState_LoadStateLoaded - return resp, err - }) - - state, err := c.GetLoadState(ctx, testCollectionName, []string{}) - assert.NoError(t, err) - assert.Equal(t, entity.LoadStateLoaded, state) -} diff --git a/client/data.go b/client/data.go index 5aa516d5..2232fe81 100644 --- a/client/data.go +++ b/client/data.go @@ -495,22 +495,6 @@ func (c *GrpcClient) CalcDistance(ctx context.Context, collName string, partitio return nil, errors.New("operators cannot be nil") } - // check meta - if err := c.checkCollectionExists(ctx, collName); err != nil { - return nil, err - } - for _, partition := range partitions { - if err := c.checkPartitionExists(ctx, collName, partition); err != nil { - return nil, err - } - } - if err := c.checkCollField(ctx, collName, opLeft.Name(), isVectorField); err != nil { - return nil, err - } - if err := c.checkCollField(ctx, collName, opRight.Name(), isVectorField); err != nil { - return nil, err - } - req := &milvuspb.CalcDistanceRequest{ OpLeft: columnToVectorsArray(collName, partitions, opLeft), OpRight: columnToVectorsArray(collName, partitions, opRight), diff --git a/client/data_test.go b/client/data_test.go index 46631b6f..bb6a0e53 100644 --- a/client/data_test.go +++ b/client/data_test.go @@ -100,151 +100,6 @@ func TestGrpcClientFlush(t *testing.T) { }) } -func TestGrpcDeleteByPks(t *testing.T) { - ctx := context.Background() - - c := testClient(ctx, t) - defer c.Close() - - mockServer.SetInjection(MDescribeCollection, describeCollectionInjection(t, 1, testCollectionName, defaultSchema())) - defer mockServer.DelInjection(MDescribeCollection) - - t.Run("normal delete by pks", func(t *testing.T) { - partName := "testPart" - mockServer.SetInjection(MHasPartition, hasPartitionInjection(t, testCollectionName, true, partName)) - defer mockServer.DelInjection(MHasPartition) - mockServer.SetInjection(MDelete, func(_ context.Context, raw proto.Message) (proto.Message, error) { - req, ok := raw.(*milvuspb.DeleteRequest) - if !ok { - t.FailNow() - } - assert.Equal(t, testCollectionName, req.GetCollectionName()) - assert.Equal(t, partName, req.GetPartitionName()) - - resp := &milvuspb.MutationResult{} - s, err := SuccessStatus() - resp.Status = s - return resp, err - }) - defer mockServer.DelInjection(MDelete) - - err := c.DeleteByPks(ctx, testCollectionName, partName, entity.NewColumnInt64(testPrimaryField, []int64{1, 2, 3})) - assert.NoError(t, err) - }) - - t.Run("Bad request deletes", func(t *testing.T) { - partName := "testPart" - mockServer.SetInjection(MHasPartition, hasPartitionInjection(t, testCollectionName, false, partName)) - defer mockServer.DelInjection(MHasPartition) - - // non-exist collection - err := c.DeleteByPks(ctx, "non-exists-collection", "", entity.NewColumnInt64("pk", []int64{})) - assert.Error(t, err) - - // non-exist parition - err = c.DeleteByPks(ctx, testCollectionName, "non-exists-part", entity.NewColumnInt64("pk", []int64{})) - assert.Error(t, err) - - // zero length pk - err = c.DeleteByPks(ctx, testCollectionName, "", entity.NewColumnInt64(testPrimaryField, []int64{})) - assert.Error(t, err) - - // string pk field - err = c.DeleteByPks(ctx, testCollectionName, "", entity.NewColumnString(testPrimaryField, []string{"1"})) - assert.Error(t, err) - - // pk name not match - err = c.DeleteByPks(ctx, testCollectionName, "", entity.NewColumnInt64("not_pk", []int64{1})) - assert.Error(t, err) - }) - - t.Run("delete services fail", func(t *testing.T) { - mockServer.SetInjection(MDelete, func(_ context.Context, raw proto.Message) (proto.Message, error) { - resp := &milvuspb.MutationResult{} - return resp, errors.New("mockServer.d error") - }) - - err := c.DeleteByPks(ctx, testCollectionName, "", entity.NewColumnInt64(testPrimaryField, []int64{1})) - assert.Error(t, err) - - mockServer.SetInjection(MDelete, func(_ context.Context, raw proto.Message) (proto.Message, error) { - resp := &milvuspb.MutationResult{} - resp.Status = &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_UnexpectedError, - } - return resp, nil - }) - err = c.DeleteByPks(ctx, testCollectionName, "", entity.NewColumnInt64(testPrimaryField, []int64{1})) - assert.Error(t, err) - }) -} - -func TestGrpcDelete(t *testing.T) { - ctx := context.Background() - - c := testClient(ctx, t) - defer c.Close() - - mockServer.SetInjection(MDescribeCollection, describeCollectionInjection(t, 1, testCollectionName, defaultSchema())) - defer mockServer.DelInjection(MDescribeCollection) - - t.Run("normal delete by pks", func(t *testing.T) { - partName := "testPart" - mockServer.SetInjection(MHasPartition, hasPartitionInjection(t, testCollectionName, true, partName)) - defer mockServer.DelInjection(MHasPartition) - mockServer.SetInjection(MDelete, func(_ context.Context, raw proto.Message) (proto.Message, error) { - req, ok := raw.(*milvuspb.DeleteRequest) - if !ok { - t.FailNow() - } - assert.Equal(t, testCollectionName, req.GetCollectionName()) - assert.Equal(t, partName, req.GetPartitionName()) - - resp := &milvuspb.MutationResult{} - s, err := SuccessStatus() - resp.Status = s - return resp, err - }) - defer mockServer.DelInjection(MDelete) - - err := c.Delete(ctx, testCollectionName, partName, "") - assert.NoError(t, err) - }) - - t.Run("Bad request deletes", func(t *testing.T) { - partName := "testPart" - mockServer.SetInjection(MHasPartition, hasPartitionInjection(t, testCollectionName, false, partName)) - defer mockServer.DelInjection(MHasPartition) - - // non-exist collection - err := c.Delete(ctx, "non-exists-collection", "", "") - assert.Error(t, err) - - // non-exist parition - err = c.Delete(ctx, testCollectionName, "non-exists-part", "") - assert.Error(t, err) - }) - t.Run("delete services fail", func(t *testing.T) { - mockServer.SetInjection(MDelete, func(_ context.Context, raw proto.Message) (proto.Message, error) { - resp := &milvuspb.MutationResult{} - return resp, errors.New("mockServer.d error") - }) - - err := c.Delete(ctx, testCollectionName, "", "") - assert.Error(t, err) - - mockServer.SetInjection(MDelete, func(_ context.Context, raw proto.Message) (proto.Message, error) { - resp := &milvuspb.MutationResult{} - resp.Status = &commonpb.Status{ - ErrorCode: commonpb.ErrorCode_UnexpectedError, - } - return resp, nil - }) - err = c.Delete(ctx, testCollectionName, "", "") - assert.Error(t, err) - }) -} - type SearchSuite struct { MockSuiteBase sch *entity.Schema @@ -634,7 +489,7 @@ func (s *QuerySuite) TestQueryFail() { FieldsData: []*schemapb.FieldData{ { FieldName: "ID", - Type: schemapb.DataType_String, //wrong data type here + Type: schemapb.DataType_String, // wrong data type here Field: &schemapb.FieldData_Scalars{ Scalars: &schemapb.ScalarField{ Data: &schemapb.ScalarField_LongData{ @@ -794,7 +649,6 @@ func TestGrpcCalcDistanceWithIDs(t *testing.T) { entity.NewColumnInt64("non-exists", []int64{1}), entity.NewColumnInt64("int64", []int64{1})) assert.Nil(t, r) assert.NotNil(t, err) - }) t.Run("valid calls", func(t *testing.T) { @@ -992,7 +846,6 @@ func TestIsCollectionPrimaryKey(t *testing.T) { assert.True(t, isCollectionPrimaryKey(&entity.Collection{ Schema: defaultSchema(), }, entity.NewColumnInt64("int64", []int64{}))) - }) } diff --git a/client/insert.go b/client/insert.go index 689cd546..4739ecc9 100644 --- a/client/insert.go +++ b/client/insert.go @@ -227,9 +227,6 @@ func (c *GrpcClient) FlushV2(ctx context.Context, collName string, async bool, o if c.Service == nil { return nil, nil, 0, nil, ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return nil, nil, 0, nil, err - } req := &milvuspb.FlushRequest{ DbName: "", // reserved, CollectionNames: []string{collName}, @@ -290,21 +287,6 @@ func (c *GrpcClient) DeleteByPks(ctx context.Context, collName string, partition return ErrClientNotReady } - // check collection name - if err := c.checkCollectionExists(ctx, collName); err != nil { - return err - } - coll, err := c.DescribeCollection(ctx, collName) - if err != nil { - return err - } - // check partition name - if partitionName != "" { - err := c.checkPartitionExists(ctx, collName, partitionName) - if err != nil { - return err - } - } // check primary keys if ids.Len() == 0 { return errors.New("ids len must not be zero") @@ -313,6 +295,11 @@ func (c *GrpcClient) DeleteByPks(ctx context.Context, collName string, partition return errors.New("only int64 and varchar column can be primary key for now") } + coll, err := c.DescribeCollection(ctx, collName) + if err != nil { + return err + } + pkf := getPKField(coll.Schema) // pkf shall not be nil since is returned from milvus if ids.Name() != "" && pkf.Name != ids.Name() { @@ -346,19 +333,6 @@ func (c *GrpcClient) Delete(ctx context.Context, collName string, partitionName return ErrClientNotReady } - // check collection name - if err := c.checkCollectionExists(ctx, collName); err != nil { - return err - } - - // check partition name - if partitionName != "" { - err := c.checkPartitionExists(ctx, collName, partitionName) - if err != nil { - return err - } - } - req := &milvuspb.DeleteRequest{ DbName: "", CollectionName: collName, diff --git a/client/insert_test.go b/client/insert_test.go index c6405148..65471cb7 100644 --- a/client/insert_test.go +++ b/client/insert_test.go @@ -13,6 +13,8 @@ package client import ( "context" + "fmt" + "math/rand" "testing" "github.com/cockroachdb/errors" @@ -804,3 +806,120 @@ func TestWrite(t *testing.T) { suite.Run(t, new(InsertSuite)) suite.Run(t, new(UpsertSuite)) } + +type DeleteSuite struct { + MockSuiteBase +} + +func (s *DeleteSuite) TestDeleteByPks() { + c := s.client + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + collectionName := fmt.Sprintf("coll_%s", randStr(6)) + partitionName := fmt.Sprintf("part_%s", randStr(6)) + + s.Run("normal_case", func() { + defer s.resetMock() + + s.setupDescribeCollection(collectionName, entity.NewSchema(). + WithField(entity.NewField().WithIsPrimaryKey(true).WithIsAutoID(true).WithName("ID").WithDataType(entity.FieldTypeInt64)). + WithField(entity.NewField().WithName("vector").WithDataType(entity.FieldTypeFloatVector).WithTypeParams(entity.TypeParamDim, "128")), + ) + s.mock.EXPECT().Delete(mock.Anything, mock.AnythingOfType("*milvuspb.DeleteRequest")).RunAndReturn(func(ctx context.Context, dr *milvuspb.DeleteRequest) (*milvuspb.MutationResult, error) { + s.Equal(collectionName, dr.GetCollectionName()) + s.Equal(partitionName, dr.GetPartitionName()) + return &milvuspb.MutationResult{ + Status: s.getSuccessStatus(), + }, nil + }).Once() + err := c.DeleteByPks(ctx, collectionName, partitionName, entity.NewColumnInt64("ID", []int64{1, 2, 3})) + s.NoError(err) + }) + + s.Run("bad_requests", func() { + defer s.resetMock() + + s.setupDescribeCollection(collectionName, entity.NewSchema(). + WithField(entity.NewField().WithIsPrimaryKey(true).WithIsAutoID(true).WithName("ID").WithDataType(entity.FieldTypeInt64)). + WithField(entity.NewField().WithName("vector").WithDataType(entity.FieldTypeFloatVector).WithTypeParams(entity.TypeParamDim, "128")), + ) + + s.Run("zero_length_pks", func() { + err := c.DeleteByPks(ctx, collectionName, "", entity.NewColumnInt64("ID", []int64{})) + s.Error(err) + }) + + s.Run("pk_type_not_valid", func() { + err := c.DeleteByPks(ctx, collectionName, "", entity.NewColumnBool("ID", []bool{true, false})) + s.Error(err) + }) + + s.Run("pk_name_not_match", func() { + err := c.DeleteByPks(ctx, collectionName, "", entity.NewColumnInt64("pk_", []int64{100, 200})) + s.Error(err) + }) + }) + + s.Run("server_error", func() { + defer s.resetMock() + + s.setupDescribeCollection(collectionName, entity.NewSchema(). + WithField(entity.NewField().WithIsPrimaryKey(true).WithIsAutoID(true).WithName("ID").WithDataType(entity.FieldTypeInt64)). + WithField(entity.NewField().WithName("vector").WithDataType(entity.FieldTypeFloatVector).WithTypeParams(entity.TypeParamDim, "128")), + ) + s.mock.EXPECT().Delete(mock.Anything, mock.AnythingOfType("")).RunAndReturn(func(ctx context.Context, dr *milvuspb.DeleteRequest) (*milvuspb.MutationResult, error) { + s.Equal(collectionName, dr.GetCollectionName()) + s.Equal(partitionName, dr.GetPartitionName()) + return nil, errors.New("mocked") + }).Once() + err := c.DeleteByPks(ctx, collectionName, partitionName, entity.NewColumnInt64("ID", []int64{1, 2, 3})) + s.Error(err) + }) +} + +func (s *DeleteSuite) TestDelete() { + c := s.client + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + collectionName := fmt.Sprintf("coll_%s", randStr(6)) + partitionName := fmt.Sprintf("part_%s", randStr(6)) + + s.Run("normal_case", func() { + defer s.resetMock() + expr := fmt.Sprintf("tag in [%d, %d, %d]", rand.Int31n(10), rand.Int31n(20), rand.Int31n(30)) + + s.mock.EXPECT().Delete(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, dr *milvuspb.DeleteRequest) (*milvuspb.MutationResult, error) { + s.Equal(collectionName, dr.GetCollectionName()) + s.Equal(partitionName, dr.GetPartitionName()) + s.Equal(expr, dr.GetExpr()) + return &milvuspb.MutationResult{ + Status: s.getSuccessStatus(), + }, nil + }).Once() + + err := c.Delete(ctx, collectionName, partitionName, expr) + s.NoError(err) + }) + + s.Run("server_error", func() { + expr := "tag > 50" + defer s.resetMock() + s.mock.EXPECT().Delete(mock.Anything, mock.Anything).RunAndReturn(func(ctx context.Context, dr *milvuspb.DeleteRequest) (*milvuspb.MutationResult, error) { + s.Equal(collectionName, dr.GetCollectionName()) + s.Equal(partitionName, dr.GetPartitionName()) + s.Equal(expr, dr.GetExpr()) + return &milvuspb.MutationResult{ + Status: s.getStatus(commonpb.ErrorCode_UnexpectedError, "mocked"), + }, nil + }).Once() + + err := c.Delete(ctx, collectionName, partitionName, expr) + s.Error(err) + }) +} + +func TestDelete(t *testing.T) { + suite.Run(t, new(DeleteSuite)) +} diff --git a/client/maintainance.go b/client/maintainance.go index 532d8718..1f9ea0ef 100644 --- a/client/maintainance.go +++ b/client/maintainance.go @@ -30,9 +30,6 @@ func (c *GrpcClient) ManualCompaction(ctx context.Context, collName string, _ ti return 0, ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return 0, err - } coll, err := c.DescribeCollection(ctx, collName) if err != nil { return 0, err diff --git a/client/partition.go b/client/partition.go index d0f95ac2..30126ecd 100644 --- a/client/partition.go +++ b/client/partition.go @@ -13,7 +13,6 @@ package client import ( "context" - "fmt" "time" "github.com/cockroachdb/errors" @@ -28,19 +27,9 @@ func (c *GrpcClient) CreatePartition(ctx context.Context, collName string, parti if c.Service == nil { return ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return err - } - has, err := c.HasPartition(ctx, collName, partitionName) - if err != nil { - return err - } - if has { - return fmt.Errorf("partition %s of collection %s already exists", partitionName, collName) - } req := &milvuspb.CreatePartitionRequest{ - DbName: "", //reserved + DbName: "", // reserved CollectionName: collName, PartitionName: partitionName, } @@ -54,28 +43,12 @@ func (c *GrpcClient) CreatePartition(ctx context.Context, collName string, parti return handleRespStatus(resp) } -func (c *GrpcClient) checkPartitionExists(ctx context.Context, collName string, partitionName string) error { - has, err := c.HasPartition(ctx, collName, partitionName) - if err != nil { - return err - } - if !has { - return partNotExistsErr(collName, partitionName) - } - return nil -} - // DropPartition drop partition from collection func (c *GrpcClient) DropPartition(ctx context.Context, collName string, partitionName string, opts ...DropPartitionOption) error { if c.Service == nil { return ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return err - } - if err := c.checkPartitionExists(ctx, collName, partitionName); err != nil { - return err - } + req := &milvuspb.DropPartitionRequest{ DbName: "", CollectionName: collName, @@ -147,14 +120,6 @@ func (c *GrpcClient) LoadPartitions(ctx context.Context, collName string, partit if c.Service == nil { return ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return err - } - for _, partitionName := range partitionNames { - if err := c.checkPartitionExists(ctx, collName, partitionName); err != nil { - return err - } - } req := &milvuspb.LoadPartitionsRequest{ DbName: "", // reserved @@ -200,14 +165,6 @@ func (c *GrpcClient) ReleasePartitions(ctx context.Context, collName string, par if c.Service == nil { return ErrClientNotReady } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return err - } - for _, partitionName := range partitionNames { - if err := c.checkPartitionExists(ctx, collName, partitionName); err != nil { - return err - } - } req := &milvuspb.ReleasePartitionsRequest{ DbName: "", // reserved CollectionName: collName, diff --git a/client/partition_test.go b/client/partition_test.go index 588eb427..9e217d97 100644 --- a/client/partition_test.go +++ b/client/partition_test.go @@ -44,42 +44,6 @@ func hasPartitionInjection(t *testing.T, collName string, mustIn bool, partition } } -func TestGrpcClientCreatePartition(t *testing.T) { - - ctx := context.Background() - c := testClient(ctx, t) - - partitionName := fmt.Sprintf("_part_%d", rand.Int()) - - mockServer.SetInjection(MHasCollection, hasCollectionDefault) - mockServer.SetInjection(MHasPartition, func(_ context.Context, raw proto.Message) (proto.Message, error) { - req, ok := raw.(*milvuspb.HasPartitionRequest) - resp := &milvuspb.BoolResponse{} - if !ok { - s, err := BadRequestStatus() - resp.Status = s - return resp, err - } - assert.Equal(t, testCollectionName, req.GetCollectionName()) - assert.Equal(t, partitionName, req.GetPartitionName()) - resp.Value = false - s, err := SuccessStatus() - resp.Status = s - return resp, err - }) - - assert.Nil(t, c.CreatePartition(ctx, testCollectionName, partitionName, WithCreatePartitionMsgBase(&commonpb.MsgBase{}))) -} - -func TestGrpcClientDropPartition(t *testing.T) { - partitionName := fmt.Sprintf("_part_%d", rand.Int()) - ctx := context.Background() - c := testClient(ctx, t) - mockServer.SetInjection(MHasCollection, hasCollectionDefault) - mockServer.SetInjection(MHasPartition, hasPartitionInjection(t, testCollectionName, true, partitionName)) // injection has assertion of collName & parition name - assert.Nil(t, c.DropPartition(ctx, testCollectionName, partitionName, WithDropPartitionMsgBase(&commonpb.MsgBase{}))) -} - func TestGrpcClientHasPartition(t *testing.T) { partitionName := fmt.Sprintf("_part_%d", rand.Int()) ctx := context.Background() @@ -121,7 +85,6 @@ func getPartitionsInterception(t *testing.T, collName string, partitions ...*ent } func TestGrpcClientShowPartitions(t *testing.T) { - ctx := context.Background() c := testClient(ctx, t) @@ -168,29 +131,6 @@ func TestGrpcClientShowPartitions(t *testing.T) { } } -func TestGrpcClientReleasePartitions(t *testing.T) { - ctx := context.Background() - - c := testClient(ctx, t) - - parts := []string{"_part1", "_part2"} - mockServer.SetInjection(MHasCollection, hasCollectionDefault) - mockServer.SetInjection(MHasPartition, hasPartitionInjection(t, testCollectionName, true, "_part1", "_part2", "_part3", "_part4")) - mockServer.SetInjection(MReleasePartitions, func(_ context.Context, raw proto.Message) (proto.Message, error) { - req, ok := raw.(*milvuspb.ReleasePartitionsRequest) - if !ok { - return BadRequestStatus() - } - assert.Equal(t, testCollectionName, req.GetCollectionName()) - assert.ElementsMatch(t, parts, req.GetPartitionNames()) - - return SuccessStatus() - }) - defer mockServer.SetInjection(MHasPartition, hasPartitionInjection(t, testCollectionName, false, "testPart")) - - assert.Nil(t, c.ReleasePartitions(ctx, testCollectionName, parts, WithReleasePartitionsMsgBase(&commonpb.MsgBase{}))) -} - func TestGrpcShowPartitions(t *testing.T) { ctx := context.Background() c := testClient(ctx, t) @@ -255,6 +195,89 @@ type PartitionSuite struct { MockSuiteBase } +func (s *PartitionSuite) TestCreatePartition() { + c := s.client + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + collectionName := fmt.Sprintf("coll_%s", randStr(6)) + partitionName := fmt.Sprintf("part_%s", randStr(6)) + + s.Run("normal_case", func() { + defer s.resetMock() + + s.mock.EXPECT().CreatePartition(mock.Anything, mock.AnythingOfType("*milvuspb.CreatePartitionRequest")).RunAndReturn(func(ctx context.Context, cpr *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) { + s.Equal(collectionName, cpr.GetCollectionName()) + s.Equal(partitionName, cpr.GetPartitionName()) + return s.getSuccessStatus(), nil + }).Once() + + err := c.CreatePartition(ctx, collectionName, partitionName) + s.NoError(err) + }) + + s.Run("server_error", func() { + defer s.resetMock() + + s.mock.EXPECT().CreatePartition(mock.Anything, mock.AnythingOfType("*milvuspb.CreatePartitionRequest")).RunAndReturn(func(ctx context.Context, cpr *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) { + s.Equal(collectionName, cpr.GetCollectionName()) + s.Equal(partitionName, cpr.GetPartitionName()) + return nil, errors.New("mocked") + }).Once() + + err := c.CreatePartition(ctx, collectionName, partitionName) + s.Error(err) + }) +} + +func (s *PartitionSuite) TestDropPartition() { + c := s.client + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + collectionName := fmt.Sprintf("coll_%s", randStr(6)) + partitionName := fmt.Sprintf("part_%s", randStr(6)) + + s.Run("normal_case", func() { + defer s.resetMock() + + s.mock.EXPECT().DropPartition(mock.Anything, mock.AnythingOfType("*milvuspb.DropPartitionRequest")).RunAndReturn(func(ctx context.Context, dpr *milvuspb.DropPartitionRequest) (*commonpb.Status, error) { + s.Equal(collectionName, dpr.GetCollectionName()) + s.Equal(partitionName, dpr.GetPartitionName()) + return s.getSuccessStatus(), nil + }).Once() + + err := c.DropPartition(ctx, collectionName, partitionName) + s.NoError(err) + }) + + s.Run("server_error", func() { + defer s.resetMock() + + s.mock.EXPECT().DropPartition(mock.Anything, mock.AnythingOfType("*milvuspb.DropPartitionRequest")).RunAndReturn(func(ctx context.Context, dpr *milvuspb.DropPartitionRequest) (*commonpb.Status, error) { + s.Equal(collectionName, dpr.GetCollectionName()) + s.Equal(partitionName, dpr.GetPartitionName()) + return nil, errors.New("mocked") + }).Once() + + err := c.DropPartition(ctx, collectionName, partitionName) + s.Error(err) + }) + + s.Run("server_error", func() { + defer s.resetMock() + + s.mock.EXPECT().DropPartition(mock.Anything, mock.AnythingOfType("*milvuspb.DropPartitionRequest")).RunAndReturn(func(ctx context.Context, dpr *milvuspb.DropPartitionRequest) (*commonpb.Status, error) { + s.Equal(collectionName, dpr.GetCollectionName()) + s.Equal(partitionName, dpr.GetPartitionName()) + return s.getStatus(commonpb.ErrorCode_UnexpectedError, ""), nil + }).Once() + + err := c.DropPartition(ctx, collectionName, partitionName) + s.Error(err) + }) +} + func (s *PartitionSuite) TestLoadPartitions() { c := s.client ctx, cancel := context.WithCancel(context.Background()) @@ -301,50 +324,6 @@ func (s *PartitionSuite) TestLoadPartitions() { s.NoError(err) }) - s.Run("has_collection_failure", func() { - s.Run("return_false", func() { - defer s.resetMock() - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}). - Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: false}, nil) - - err := c.LoadPartitions(ctx, testCollectionName, partNames, false) - s.Error(err) - }) - - s.Run("return_error", func() { - defer s.resetMock() - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}). - Return(nil, errors.New("mock error")) - - err := c.LoadPartitions(ctx, testCollectionName, partNames, false) - s.Error(err) - }) - }) - - s.Run("has_partition_failure", func() { - s.Run("return_false", func() { - defer s.resetMock() - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}). - Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: true}, nil) - s.mock.EXPECT().HasPartition(mock.Anything, mock.Anything). - Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: false}, nil) - - err := c.LoadPartitions(ctx, testCollectionName, partNames, false) - s.Error(err) - }) - - s.Run("return_error", func() { - defer s.resetMock() - s.mock.EXPECT().HasCollection(mock.Anything, &milvuspb.HasCollectionRequest{CollectionName: testCollectionName}). - Return(&milvuspb.BoolResponse{Status: &commonpb.Status{}, Value: true}, nil) - s.mock.EXPECT().HasPartition(mock.Anything, mock.Anything). - Return(nil, errors.New("mock")) - - err := c.LoadPartitions(ctx, testCollectionName, partNames, false) - s.Error(err) - }) - }) - s.Run("load_partitions_failure", func() { s.Run("fail_status_code", func() { defer s.resetMock() @@ -410,6 +389,41 @@ func (s *PartitionSuite) TestLoadPartitions() { }) } +func (s *PartitionSuite) TestReleasePartitions() { + c := s.client + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + collectionName := fmt.Sprintf("coll_%d", rand.Int31n(100)) + partNames := []string{"part_1", "part_2"} + + s.Run("normal_case", func() { + defer s.resetMock() + + s.mock.EXPECT().ReleasePartitions(mock.Anything, mock.AnythingOfType("")).RunAndReturn(func(ctx context.Context, rpr *milvuspb.ReleasePartitionsRequest) (*commonpb.Status, error) { + s.Equal(collectionName, rpr.GetCollectionName()) + s.Equal(partNames, rpr.GetPartitionNames()) + return s.getSuccessStatus(), nil + }).Once() + + err := c.ReleasePartitions(ctx, collectionName, partNames) + s.NoError(err) + }) + + s.Run("server_error", func() { + defer s.resetMock() + + s.mock.EXPECT().ReleasePartitions(mock.Anything, mock.AnythingOfType("")).RunAndReturn(func(ctx context.Context, rpr *milvuspb.ReleasePartitionsRequest) (*commonpb.Status, error) { + s.Equal(collectionName, rpr.GetCollectionName()) + s.Equal(partNames, rpr.GetPartitionNames()) + return nil, errors.New("mocked") + }).Once() + + err := c.ReleasePartitions(ctx, collectionName, partNames) + s.Error(err) + }) +} + func TestPartitionSuite(t *testing.T) { suite.Run(t, new(PartitionSuite)) } diff --git a/client/row.go b/client/row.go index 6e36ab8d..12014f4b 100644 --- a/client/row.go +++ b/client/row.go @@ -60,7 +60,8 @@ func (c *GrpcClient) CreateCollectionByRow(ctx context.Context, row entity.Row, // InsertByRows insert by rows func (c *GrpcClient) InsertByRows(ctx context.Context, collName string, partitionName string, - rows []entity.Row) (entity.Column, error) { + rows []entity.Row, +) (entity.Column, error) { anys := make([]interface{}, 0, len(rows)) for _, row := range rows { anys = append(anys, row) @@ -72,7 +73,8 @@ func (c *GrpcClient) InsertByRows(ctx context.Context, collName string, partitio // InsertRows allows insert with row based data // rows could be struct or map. func (c *GrpcClient) InsertRows(ctx context.Context, collName string, partitionName string, - rows []interface{}) (entity.Column, error) { + rows []interface{}, +) (entity.Column, error) { if c.Service == nil { return nil, ErrClientNotReady } @@ -80,14 +82,6 @@ func (c *GrpcClient) InsertRows(ctx context.Context, collName string, partitionN return nil, errors.New("empty rows provided") } - if err := c.checkCollectionExists(ctx, collName); err != nil { - return nil, err - } - if partitionName != "" { - if err := c.checkPartitionExists(ctx, collName, partitionName); err != nil { - return nil, err - } - } coll, err := c.DescribeCollection(ctx, collName) if err != nil { return nil, err @@ -97,7 +91,7 @@ func (c *GrpcClient) InsertRows(ctx context.Context, collName string, partitionN if err != nil { return nil, err } - //fieldData + // fieldData // 2. do insert request req := &milvuspb.InsertRequest{ DbName: "", // reserved @@ -211,10 +205,8 @@ func SearchResultToRows(sch *entity.Schema, results *schemapb.SearchResultData, return sr, nil } -var ( - // ErrFieldTypeNotMatch error for field type not match - ErrFieldTypeNotMatch = errors.New("field type not matched") -) +// ErrFieldTypeNotMatch error for field type not match +var ErrFieldTypeNotMatch = errors.New("field type not matched") // SetFieldValue set row field value with reflection func SetFieldValue(field *entity.Field, f reflect.Value, fieldData *schemapb.FieldData, idx int) error { @@ -329,7 +321,6 @@ func SetFieldValue(field *entity.Field, f reflect.Value, fieldData *schemapb.Fie } data := vectors.GetFloatVector() if data == nil { - return ErrFieldTypeNotMatch } vector := data.Data[idx*int(vectors.Dim) : (idx+1)*int(vectors.Dim)] diff --git a/client/row_test.go b/client/row_test.go index 4708c825..b9588865 100644 --- a/client/row_test.go +++ b/client/row_test.go @@ -53,7 +53,7 @@ func TestCreateCollectionByRow(t *testing.T) { }) t.Run("Invalid cases", func(t *testing.T) { - //Duplicated + // Duplicated m := make(map[string]struct{}) mockServer.SetInjection(MCreateCollection, func(_ context.Context, raw proto.Message) (proto.Message, error) { req, ok := raw.(*milvuspb.CreateCollectionRequest) @@ -106,51 +106,6 @@ func (s *InsertByRowsSuite) TestFails() { s.Error(err) }) - s.Run("fail_collection_not_found", func() { - defer s.resetMock() - s.setupHasCollection() - _, err := c.InsertByRows(ctx, testCollectionName, partName, []entity.Row{entity.RowBase{}}) - s.Error(err) - }) - - s.Run("fail_hascollection_errcode", func() { - defer s.resetMock() - s.setupHasCollectionError(commonpb.ErrorCode_UnexpectedError, nil) - _, err := c.InsertByRows(ctx, testCollectionName, partName, []entity.Row{entity.RowBase{}}) - s.Error(err) - }) - - s.Run("fail_hascollection_error", func() { - defer s.resetMock() - s.setupHasCollectionError(commonpb.ErrorCode_Success, errors.New("mock error")) - _, err := c.InsertByRows(ctx, testCollectionName, partName, []entity.Row{entity.RowBase{}}) - s.Error(err) - }) - - s.Run("fail_partition_not_found", func() { - defer s.resetMock() - s.setupHasCollection(testCollectionName) - s.setupHasPartition(testCollectionName) - _, err := c.InsertByRows(ctx, testCollectionName, partName, []entity.Row{entity.RowBase{}}) - s.Error(err) - }) - - s.Run("fail_haspartition_error", func() { - defer s.resetMock() - s.setupHasCollection(testCollectionName) - s.setupHasPartitionError(commonpb.ErrorCode_Success, errors.New("mock error")) - _, err := c.InsertByRows(ctx, testCollectionName, partName, []entity.Row{entity.RowBase{}}) - s.Error(err) - }) - - s.Run("fail_haspartition_errcode", func() { - defer s.resetMock() - s.setupHasCollection(testCollectionName) - s.setupHasPartitionError(commonpb.ErrorCode_UnexpectedError, nil) - _, err := c.InsertByRows(ctx, testCollectionName, partName, []entity.Row{entity.RowBase{}}) - s.Error(err) - }) - s.Run("fail_describecollection_error", func() { defer s.resetMock() s.setupHasCollection(testCollectionName) @@ -290,8 +245,6 @@ func (s *InsertByRowsSuite) TestSuccess() { s.Run("non_dynamic", func() { defer s.resetMock() - s.setupHasCollection(testCollectionName) - s.setupHasPartition(testCollectionName, partName) s.setupDescribeCollection(testCollectionName, entity.NewSchema().WithName(testCollectionName). WithField(entity.NewField().WithName("ID").WithDataType(entity.FieldTypeInt64).WithIsPrimaryKey(true)). WithField(entity.NewField().WithName("Vector").WithDataType(entity.FieldTypeFloatVector).WithTypeParams(entity.TypeParamDim, "128")), @@ -320,8 +273,6 @@ func (s *InsertByRowsSuite) TestSuccess() { s.Run("dynamic", func() { defer s.resetMock() - s.setupHasCollection(testCollectionName) - s.setupHasPartition(testCollectionName, partName) s.setupDescribeCollection(testCollectionName, entity.NewSchema(). WithName(testCollectionName).WithDynamicFieldEnabled(true). WithField(entity.NewField().WithName("ID").WithDataType(entity.FieldTypeInt64).WithIsPrimaryKey(true)). @@ -534,7 +485,7 @@ func TestSetFieldValue(t *testing.T) { f64 := reflect.ValueOf(item).Elem().FieldByName("Double") str := reflect.ValueOf(item).Elem().FieldByName("String") vf := reflect.ValueOf(item).Elem().FieldByName("Arr") - //vb := reflect.ValueOf(item).Elem().FieldByName("ArrBin") + // vb := reflect.ValueOf(item).Elem().FieldByName("ArrBin") err = SetFieldValue(&entity.Field{ DataType: entity.FieldTypeNone, @@ -627,6 +578,7 @@ func TestSetFieldValue(t *testing.T) { func emptyFieldData() *schemapb.FieldData { return &schemapb.FieldData{} } + func emptyScalarFieldData() *schemapb.FieldData { return &schemapb.FieldData{ Field: &schemapb.FieldData_Scalars{ diff --git a/test/testcases/collection_test.go b/test/testcases/collection_test.go index 7111055c..4311f56f 100644 --- a/test/testcases/collection_test.go +++ b/test/testcases/collection_test.go @@ -159,12 +159,16 @@ func TestCreateCollectionInvalidFields(t *testing.T) { } invalidFields := []invalidFieldsStruct{ // create collection without pk field - {fields: []*entity.Field{common.GenField(common.DefaultFloatVecFieldName, entity.FieldTypeFloatVector, common.WithDim(common.DefaultDim))}, - errMsg: "primary key is not specified"}, + { + fields: []*entity.Field{common.GenField(common.DefaultFloatVecFieldName, entity.FieldTypeFloatVector, common.WithDim(common.DefaultDim))}, + errMsg: "primary key is not specified", + }, // create collection without vector field - {fields: []*entity.Field{common.GenField(common.DefaultIntFieldName, entity.FieldTypeInt64, common.WithIsPrimaryKey(true))}, - errMsg: "vector field not set"}, + { + fields: []*entity.Field{common.GenField(common.DefaultIntFieldName, entity.FieldTypeInt64, common.WithIsPrimaryKey(true))}, + errMsg: "vector field not set", + }, // create collection with multi pk fields {fields: []*entity.Field{ @@ -354,7 +358,7 @@ func TestCreateCollectionDescription(t *testing.T) { pkField := common.GenField(common.DefaultIntFieldName, entity.FieldTypeInt64, common.WithIsPrimaryKey(true), common.WithFieldDescription("pk field")) vecField := common.GenField("", entity.FieldTypeFloatVector, common.WithDim(common.DefaultDim)) - var fields = []*entity.Field{ + fields := []*entity.Field{ pkField, vecField, } schema := &entity.Schema{ @@ -491,8 +495,10 @@ func TestCreateCollectionDynamicSchema(t *testing.T) { common.CheckContainsCollection(t, collections, collName) // insert data - dp := DataParams{CollectionName: collName, PartitionName: "", CollectionFieldsType: Int64FloatVec, - start: 0, nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: true, WithRows: false} + dp := DataParams{ + CollectionName: collName, PartitionName: "", CollectionFieldsType: Int64FloatVec, + start: 0, nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: true, WithRows: false, + } _, err = insertData(ctx, t, mc, dp) common.CheckErr(t, err, true) } @@ -519,8 +525,10 @@ func TestCreateCollectionDynamic(t *testing.T) { common.CheckContainsCollection(t, collections, collName) // insert data - dp := DataParams{CollectionName: collName, PartitionName: "", CollectionFieldsType: Int64FloatVec, - start: 0, nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: true, WithRows: false} + dp := DataParams{ + CollectionName: collName, PartitionName: "", CollectionFieldsType: Int64FloatVec, + start: 0, nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: true, WithRows: false, + } _, err = insertData(ctx, t, mc, dp) common.CheckErr(t, err, true) } @@ -693,5 +701,5 @@ func TestGetStaticsCollectionNotExisted(t *testing.T) { // flush and check row count _, errStatist := mc.GetCollectionStatistics(ctx, "collName") - common.CheckErr(t, errStatist, false, "collection collName does not exist") + common.CheckErr(t, errStatist, false, "collection not found") } diff --git a/test/testcases/compact_test.go b/test/testcases/compact_test.go index 13639bcd..3748f25f 100644 --- a/test/testcases/compact_test.go +++ b/test/testcases/compact_test.go @@ -18,14 +18,18 @@ func TestCompact(t *testing.T) { mc := createMilvusClient(ctx, t) // create collection with 1 shard - cp := CollectionParams{CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: true, - ShardsNum: 1, Dim: common.DefaultDim} + cp := CollectionParams{ + CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: true, + ShardsNum: 1, Dim: common.DefaultDim, + } collName := createCollection(ctx, t, mc, cp) // insert for i := 0; i < 4; i++ { - dp := DataParams{CollectionName: collName, PartitionName: "", CollectionFieldsType: AllFields, - start: i * common.DefaultNb, nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: true, WithRows: false} + dp := DataParams{ + CollectionName: collName, PartitionName: "", CollectionFieldsType: AllFields, + start: i * common.DefaultNb, nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: true, WithRows: false, + } _, _ = insertData(ctx, t, mc, dp) mc.Flush(ctx, collName, false) } @@ -87,7 +91,7 @@ func TestCompactCollectionNotExist(t *testing.T) { mc := createMilvusClient(ctx, t) _, err := mc.Compact(ctx, "coll", 0) - common.CheckErr(t, err, false, "collection coll does not exist") + common.CheckErr(t, err, false, "can't find collection") } // test compact empty collection diff --git a/test/testcases/delete_test.go b/test/testcases/delete_test.go index 21745baf..740c6c8a 100644 --- a/test/testcases/delete_test.go +++ b/test/testcases/delete_test.go @@ -86,7 +86,7 @@ func TestDeleteNotExistCollection(t *testing.T) { // flush and check row count deleteIds := entity.NewColumnInt64(common.DefaultIntFieldName, []int64{0, 1}) errDelete := mc.DeleteByPks(ctx, "collName", common.DefaultPartition, deleteIds) - common.CheckErr(t, errDelete, false, "collection collName does not exist") + common.CheckErr(t, errDelete, false, "can't find collection") } // test delete from an not exist partition @@ -105,7 +105,7 @@ func TestDeleteNotExistPartition(t *testing.T) { // delete deleteIds := ids.Slice(0, 10) errDelete := mc.DeleteByPks(ctx, collName, "p1", deleteIds) - common.CheckErr(t, errDelete, false, fmt.Sprintf("partition p1 of collection %s does not exist", collName)) + common.CheckErr(t, errDelete, false, "partition not found") } // test delete empty partition names diff --git a/test/testcases/flush_test.go b/test/testcases/flush_test.go index 2aa37938..15ae25f9 100644 --- a/test/testcases/flush_test.go +++ b/test/testcases/flush_test.go @@ -57,7 +57,7 @@ func TestFlushNotExistedCollection(t *testing.T) { // flush and check row count errFlush := mc.Flush(ctx, "collName", false) - common.CheckErr(t, errFlush, false, "collection collName does not exist") + common.CheckErr(t, errFlush, false, "collection not found") } // test flush async diff --git a/test/testcases/index_test.go b/test/testcases/index_test.go index 1022530b..090bc181 100644 --- a/test/testcases/index_test.go +++ b/test/testcases/index_test.go @@ -1288,7 +1288,7 @@ func TestCreateIndexNotExistCollName(t *testing.T) { // create index idx, _ := entity.NewIndexHNSW(entity.L2, 8, 96) err := mc.CreateIndex(ctx, "haha", common.DefaultFloatVecFieldName, idx, false) - common.CheckErr(t, err, false, "collection haha does not exist") + common.CheckErr(t, err, false, fmt.Sprintf("collection %s does not exist", "haha")) } func TestCreateIndexNotExistField(t *testing.T) { diff --git a/test/testcases/iterator_test.go b/test/testcases/iterator_test.go new file mode 100644 index 00000000..f4230055 --- /dev/null +++ b/test/testcases/iterator_test.go @@ -0,0 +1,53 @@ +//go:build L0 + +package testcases + +// func TestSearchIteratorDefault(t *testing.T) { +// ctx := createContext(t, time.Second*common.DefaultTimeout) +// // connect +// mc := createMilvusClient(ctx, t) + +// // create collection +// cp := CollectionParams{CollectionFieldsType: Int64FloatVec, AutoID: false, EnableDynamicField: true, +// ShardsNum: common.DefaultShards, Dim: common.DefaultDim} +// collName := createCollection(ctx, t, mc, cp) + +// // insert +// dpColumns := DataParams{CollectionName: collName, PartitionName: "", CollectionFieldsType: Int64FloatVec, +// start: 0, nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: true, WithRows: false} +// _, _ = insertData(ctx, t, mc, dpColumns) + +// mc.Flush(ctx, collName, false) + +// /* dpRows := DataParams{CollectionName: collName, PartitionName: "", CollectionFieldsType: Int64FloatVec, +// start: common.DefaultNb, nb: common.DefaultNb * 2, dim: common.DefaultDim, EnableDynamicField: true, WithRows: true} +// _, _ = insertData(ctx, t, mc, dpRows)*/ + +// idx, _ := entity.NewIndexHNSW(entity.COSINE, 8, 96) +// _ = mc.CreateIndex(ctx, collName, common.DefaultFloatVecFieldName, idx, false) + +// // Load collection +// errLoad := mc.LoadCollection(ctx, collName, false) +// common.CheckErr(t, errLoad, true) + +// // search iterator with default batch +// sp, _ := entity.NewIndexHNSWSearchParam(80) +// queryVec := common.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector) +// opt := client.NewSearchIteratorOption(collName, common.DefaultFloatVecFieldName, sp, queryVec[0], entity.COSINE) +// itr, err := mc.SearchIterator(ctx, opt) +// common.CheckErr(t, err, true) +// // common.CheckSearchIteratorResult(ctx, t, itr, common.DefaultNb) +// var cnt int +// for { +// sr, err := itr.Next(ctx) +// if err != nil { +// if err == io.EOF { +// break +// } +// t.FailNow() +// } +// cnt += sr.IDs.Len() +// time.Sleep(time.Second) +// } +// log.Println(cnt) +// } diff --git a/test/testcases/load_release_test.go b/test/testcases/load_release_test.go index 05752eeb..7fee03a3 100644 --- a/test/testcases/load_release_test.go +++ b/test/testcases/load_release_test.go @@ -20,7 +20,7 @@ import ( // test load collection func TestLoadCollection(t *testing.T) { - //t.Skip("Issue: https://github.com/milvus-io/milvus-sdk-go/issues/374") + // t.Skip("Issue: https://github.com/milvus-io/milvus-sdk-go/issues/374") ctx := createContext(t, time.Second*common.DefaultTimeout) // connect mc := createMilvusClient(ctx, t) @@ -39,7 +39,7 @@ func TestLoadCollection(t *testing.T) { // check collection loaded collection, _ := mc.DescribeCollection(ctx, collName) log.Println(collection.Loaded) - //require.True(t, collection.Loaded) + // require.True(t, collection.Loaded) } // test load not existed collection @@ -50,7 +50,7 @@ func TestLoadCollectionNotExist(t *testing.T) { // Load collection errLoad := mc.LoadCollection(ctx, "collName", false) - common.CheckErr(t, errLoad, false, "exist") + common.CheckErr(t, errLoad, false, "collection not found") } // test load collection async @@ -98,7 +98,7 @@ func TestLoadCollectionWithoutIndex(t *testing.T) { // load collection with multi partitions func TestLoadCollectionMultiPartitions(t *testing.T) { - //t.Skip("Issue: https://github.com/milvus-io/milvus-sdk-go/issues/374") + // t.Skip("Issue: https://github.com/milvus-io/milvus-sdk-go/issues/374") ctx := createContext(t, time.Second*common.DefaultTimeout*3) // connect mc := createMilvusClient(ctx, t) @@ -119,18 +119,17 @@ func TestLoadCollectionMultiPartitions(t *testing.T) { partitions, _ := mc.ShowPartitions(ctx, collName) for _, partition := range partitions { log.Println(partition.Loaded) - //require.True(t, partition.Loaded) + // require.True(t, partition.Loaded) } // check collection loaded collection, _ := mc.DescribeCollection(ctx, collName) log.Println(collection.Loaded) - //require.True(t, collection.Loaded) + // require.True(t, collection.Loaded) } // test load with empty partition name "" func TestLoadEmptyPartitionName(t *testing.T) { - //t.Skip("Issue: https://github.com/milvus-io/milvus-sdk-go/issues/373") ctx := createContext(t, time.Second*common.DefaultTimeout*3) // connect mc := createMilvusClient(ctx, t) @@ -145,7 +144,7 @@ func TestLoadEmptyPartitionName(t *testing.T) { // load partition with empty partition names errLoadEmpty := mc.LoadPartitions(ctx, collName, []string{""}, false) - common.CheckErr(t, errLoadEmpty, false, "request failed") + common.CheckErr(t, errLoadEmpty, false, "partition not found[partition=]") } // test load partitions with empty slice []string{} @@ -183,11 +182,11 @@ func TestLoadPartitionsNotExist(t *testing.T) { // load with not exist partition names errLoadNotExist := mc.LoadPartitions(ctx, collName, []string{"xxx"}, false) - common.CheckErr(t, errLoadNotExist, false, fmt.Sprintf("partition xxx of collection %s does not exist", collName)) + common.CheckErr(t, errLoadNotExist, false, "partition not found") // load partition with part exist partition names errLoadPartExist := mc.LoadPartitions(ctx, collName, []string{"xxx", partitionName}, false) - common.CheckErr(t, errLoadPartExist, false, fmt.Sprintf("partition xxx of collection %s does not exist", collName)) + common.CheckErr(t, errLoadPartExist, false, "partition not found") } // test load partition @@ -216,16 +215,16 @@ func TestLoadPartitions(t *testing.T) { partitions, _ := mc.ShowPartitions(ctx, collName) for _, p := range partitions { if p.Name == partitionName { - //require.True(t, p.Loaded) + // require.True(t, p.Loaded) log.Println(p.Loaded) } else { log.Println(p.Loaded) - //require.True(t, p.Loaded) + // require.True(t, p.Loaded) } log.Printf("id: %d, name: %s, loaded %t", p.ID, p.Name, p.Loaded) } - //query from nb from partition + // query from nb from partition queryIds := entity.NewColumnInt64(common.DefaultIntFieldName, []int64{0, int64(nb)}) queryResultPartition, _ := mc.QueryByPks(ctx, collName, []string{}, queryIds, []string{common.DefaultIntFieldName}) common.CheckQueryResult(t, queryResultPartition, []entity.Column{ @@ -251,7 +250,7 @@ func TestLoadMultiPartitions(t *testing.T) { errLoad := mc.LoadPartitions(ctx, collName, []string{common.DefaultPartition}, false) common.CheckErr(t, errLoad, true) - //query nb from default partition + // query nb from default partition resDef, _ := mc.Query(ctx, collName, []string{common.DefaultPartition}, "", []string{common.QueryCountFieldName}) require.EqualValues(t, common.DefaultNb, resDef.GetColumn(common.QueryCountFieldName).(*entity.ColumnInt64).Data()[0]) @@ -283,7 +282,7 @@ func TestLoadPartitionsRepeatedly(t *testing.T) { errLoadRepeat := mc.LoadPartitions(ctx, collName, []string{partitionName, partitionName}, false) common.CheckErr(t, errLoadRepeat, true) - //query from nb from partition + // query from nb from partition queryIds := entity.NewColumnInt64(common.DefaultIntFieldName, []int64{common.DefaultNb}) queryResultPartition, _ := mc.QueryByPks(ctx, collName, []string{}, queryIds, []string{common.DefaultIntFieldName}) common.CheckQueryResult(t, queryResultPartition, []entity.Column{queryIds}) @@ -333,13 +332,17 @@ func TestLoadCollectionMultiVectors(t *testing.T) { mc := createMilvusClient(ctx, t) // create collection - cp := CollectionParams{CollectionFieldsType: AllVectors, AutoID: false, EnableDynamicField: true, - ShardsNum: common.DefaultShards, Dim: common.DefaultDim} + cp := CollectionParams{ + CollectionFieldsType: AllVectors, AutoID: false, EnableDynamicField: true, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim, + } collName := createCollection(ctx, t, mc, cp) // insert - dp := DataParams{CollectionName: collName, PartitionName: "", CollectionFieldsType: AllVectors, - start: 0, nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: true, WithRows: false} + dp := DataParams{ + CollectionName: collName, PartitionName: "", CollectionFieldsType: AllVectors, + start: 0, nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: true, WithRows: false, + } _, _ = insertData(ctx, t, mc, dp) mc.Flush(ctx, collName, false) @@ -372,7 +375,7 @@ func TestReleasePartition(t *testing.T) { // create collection -> insert data -> create index collName, _ := createCollectionWithDataIndex(ctx, t, mc, true, true) - //load collection + // load collection errLoad := mc.LoadCollection(ctx, collName, true) common.CheckErr(t, errLoad, true) @@ -400,13 +403,13 @@ func TestReleaseCollectionNotExist(t *testing.T) { // create collection -> insert data -> create index collName, _ := createCollectionWithDataIndex(ctx, t, mc, true, true) - //load collection + // load collection errLoad := mc.LoadCollection(ctx, collName, true) common.CheckErr(t, errLoad, true) // release collection errRelease := mc.ReleaseCollection(ctx, "collName") - common.CheckErr(t, errRelease, false, "not exist") + common.CheckErr(t, errRelease, false, "collection not found") } // test release partitions @@ -467,11 +470,11 @@ func TestReleasePartitionsNotExist(t *testing.T) { // release partition errRelease := mc.ReleasePartitions(ctx, collName, []string{"partitionName"}) - common.CheckErr(t, errRelease, false, "not exist") + common.CheckErr(t, errRelease, false, "partition not found") // release partition errRelease2 := mc.ReleasePartitions(ctx, collName, []string{"partitionName", partitionName}) - common.CheckErr(t, errRelease2, false, "not exist") + common.CheckErr(t, errRelease2, false, "partition not found") // check release success partitions, _ := mc.ShowPartitions(ctx, collName) @@ -530,12 +533,16 @@ func TestMmapCollectionIndexDefault(t *testing.T) { mc := createMilvusClient(ctx, t) // create -> insert [0, 3000) -> flush - cp := CollectionParams{CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: true, - ShardsNum: common.DefaultShards, Dim: common.DefaultDim} + cp := CollectionParams{ + CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: true, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim, + } collName := createCollection(ctx, t, mc, cp) - dp := DataParams{DoInsert: true, CollectionName: collName, CollectionFieldsType: AllFields, start: 0, - nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: true} + dp := DataParams{ + DoInsert: true, CollectionName: collName, CollectionFieldsType: AllFields, start: 0, + nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: true, + } insertData(ctx, t, mc, dp) _ = mc.Flush(ctx, collName, false) @@ -575,7 +582,7 @@ func TestMmapCollectionIndexDefault(t *testing.T) { err := mc.AlterCollection(ctx, collName, entity.Mmap(true)) common.CheckErr(t, err, true) - //describe collection + // describe collection mc.LoadCollection(ctx, collName, false) coll, _ = mc.DescribeCollection(ctx, collName) require.Equal(t, "true", coll.Properties["mmap.enabled"]) @@ -612,11 +619,15 @@ func TestMmapAlterCollectionIndexDefault(t *testing.T) { mc := createMilvusClient(ctx, t) // create -> insert [0, 3000) -> flush - cp := CollectionParams{CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: true, - ShardsNum: common.DefaultShards, Dim: common.DefaultDim} + cp := CollectionParams{ + CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: true, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim, + } - dp := DataParams{DoInsert: true, CollectionFieldsType: AllFields, start: 0, - nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: true} + dp := DataParams{ + DoInsert: true, CollectionFieldsType: AllFields, start: 0, + nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: true, + } ips := GenDefaultIndexParamsForAllVectors() collName := prepareCollection(ctx, t, mc, cp, WithDataParams(dp), WithIndexParams(ips), WithCreateOption(client.WithConsistencyLevel(entity.ClStrong))) @@ -628,7 +639,7 @@ func TestMmapAlterCollectionIndexDefault(t *testing.T) { require.Equal(t, "", indexes[0].Params()["mmap.enabled"]) } - //describe collection + // describe collection mc.LoadCollection(ctx, collName, false) coll, _ := mc.DescribeCollection(ctx, collName) require.Equal(t, "", coll.Properties["mmap.enabled"]) @@ -643,7 +654,7 @@ func TestMmapAlterCollectionIndexDefault(t *testing.T) { } // load collection -> describe collection and check mmap false - //describe collection + // describe collection mc.LoadCollection(ctx, collName, false) coll, _ = mc.DescribeCollection(ctx, collName) require.Equal(t, "true", coll.Properties["mmap.enabled"]) @@ -683,11 +694,15 @@ func TestMmapCollectionLoaded(t *testing.T) { mc := createMilvusClient(ctx, t) // create -> insert -> flush -> index -> load - cp := CollectionParams{CollectionFieldsType: Int64FloatVec, AutoID: false, EnableDynamicField: false, - ShardsNum: common.DefaultShards, Dim: common.DefaultDim} + cp := CollectionParams{ + CollectionFieldsType: Int64FloatVec, AutoID: false, EnableDynamicField: false, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim, + } - dp := DataParams{DoInsert: true, CollectionFieldsType: Int64FloatVec, start: 0, nb: common.DefaultNb, - dim: common.DefaultDim, EnableDynamicField: false} + dp := DataParams{ + DoInsert: true, CollectionFieldsType: Int64FloatVec, start: 0, nb: common.DefaultNb, + dim: common.DefaultDim, EnableDynamicField: false, + } collName := prepareCollection(ctx, t, mc, cp, WithDataParams(dp), WithCreateOption(client.WithConsistencyLevel(entity.ClStrong))) @@ -707,11 +722,15 @@ func TestMmapCollectionScalarIndexed(t *testing.T) { mc := createMilvusClient(ctx, t) // create -> insert -> flush -> index -> load - cp := CollectionParams{CollectionFieldsType: Int64FloatVec, AutoID: false, EnableDynamicField: false, - ShardsNum: common.DefaultShards, Dim: common.DefaultDim} + cp := CollectionParams{ + CollectionFieldsType: Int64FloatVec, AutoID: false, EnableDynamicField: false, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim, + } - dp := DataParams{DoInsert: true, CollectionFieldsType: Int64FloatVec, start: 0, nb: common.DefaultNb, - dim: common.DefaultDim, EnableDynamicField: false} + dp := DataParams{ + DoInsert: true, CollectionFieldsType: Int64FloatVec, start: 0, nb: common.DefaultNb, + dim: common.DefaultDim, EnableDynamicField: false, + } collName := prepareCollection(ctx, t, mc, cp, WithDataParams(dp), WithCreateOption(client.WithConsistencyLevel(entity.ClStrong))) // create scalar index @@ -744,11 +763,15 @@ func TestMmapScalarInvertedIndex(t *testing.T) { mc := createMilvusClient(ctx, t) // create -> insert [0, 3000) -> flush -> index -> load - cp := CollectionParams{CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: true, - ShardsNum: common.DefaultShards, Dim: common.DefaultDim} + cp := CollectionParams{ + CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: true, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim, + } - dp := DataParams{DoInsert: true, CollectionFieldsType: AllFields, start: 0, nb: common.DefaultNb, - dim: common.DefaultDim, EnableDynamicField: true} + dp := DataParams{ + DoInsert: true, CollectionFieldsType: AllFields, start: 0, nb: common.DefaultNb, + dim: common.DefaultDim, EnableDynamicField: true, + } // build vector's indexes ips := GenDefaultIndexParamsForAllVectors() @@ -756,7 +779,7 @@ func TestMmapScalarInvertedIndex(t *testing.T) { collName := prepareCollection(ctx, t, mc, cp, WithDataParams(dp), WithIndexParams(ips), WithLoadParams(lp), WithCreateOption(client.WithConsistencyLevel(entity.ClStrong))) - //create scalar index with mmap + // create scalar index with mmap collection, _ := mc.DescribeCollection(ctx, collName) for _, field := range collection.Schema.Fields { if SupportScalarIndexFieldType(field.DataType) { @@ -799,11 +822,15 @@ func TestMmapScalarBitmapIndex(t *testing.T) { mc := createMilvusClient(ctx, t) // create -> insert [0, 3000) -> flush -> index -> load - cp := CollectionParams{CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: true, - ShardsNum: common.DefaultShards, Dim: common.DefaultDim} + cp := CollectionParams{ + CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: true, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim, + } - dp := DataParams{DoInsert: true, CollectionFieldsType: AllFields, start: 0, nb: common.DefaultNb, - dim: common.DefaultDim, EnableDynamicField: true} + dp := DataParams{ + DoInsert: true, CollectionFieldsType: AllFields, start: 0, nb: common.DefaultNb, + dim: common.DefaultDim, EnableDynamicField: true, + } // build vector's indexes ips := GenDefaultIndexParamsForAllVectors() @@ -811,7 +838,7 @@ func TestMmapScalarBitmapIndex(t *testing.T) { collName := prepareCollection(ctx, t, mc, cp, WithDataParams(dp), WithIndexParams(ips), WithLoadParams(lp), WithCreateOption(client.WithConsistencyLevel(entity.ClStrong))) - //create scalar index with mmap + // create scalar index with mmap collection, _ := mc.DescribeCollection(ctx, collName) BitmapNotSupport := []interface{}{entity.FieldTypeJSON, entity.FieldTypeDouble, entity.FieldTypeFloat} for _, field := range collection.Schema.Fields { @@ -855,11 +882,15 @@ func TestMmapScalarHybirdIndex(t *testing.T) { mc := createMilvusClient(ctx, t) // create -> insert [0, 3000) -> flush -> index -> load - cp := CollectionParams{CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: true, - ShardsNum: common.DefaultShards, Dim: common.DefaultDim} + cp := CollectionParams{ + CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: true, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim, + } - dp := DataParams{DoInsert: true, CollectionFieldsType: AllFields, start: 0, nb: common.DefaultNb, - dim: common.DefaultDim, EnableDynamicField: true} + dp := DataParams{ + DoInsert: true, CollectionFieldsType: AllFields, start: 0, nb: common.DefaultNb, + dim: common.DefaultDim, EnableDynamicField: true, + } // build vector's indexes ips := GenDefaultIndexParamsForAllVectors() @@ -867,7 +898,7 @@ func TestMmapScalarHybirdIndex(t *testing.T) { collName := prepareCollection(ctx, t, mc, cp, WithDataParams(dp), WithIndexParams(ips), WithLoadParams(lp), WithCreateOption(client.WithConsistencyLevel(entity.ClStrong))) - //create scalar index with mmap + // create scalar index with mmap collection, _ := mc.DescribeCollection(ctx, collName) for _, field := range collection.Schema.Fields { if SupportScalarIndexFieldType(field.DataType) { @@ -909,11 +940,13 @@ func TestMmapIndexUnsupported(t *testing.T) { mc := createMilvusClient(ctx, t) // create -> insert -> flush -> index -> load - cp := CollectionParams{CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: false, - ShardsNum: common.DefaultShards, Dim: common.DefaultDim} + cp := CollectionParams{ + CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: false, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim, + } collName := createCollection(ctx, t, mc, cp) - //create index with mmap + // create index with mmap idx, _ := entity.NewIndexDISKANN(entity.COSINE) err := mc.CreateIndex(ctx, collName, common.DefaultFloatVecFieldName, idx, false, client.WithMmap(true)) common.CheckErr(t, err, false, "index type DISKANN does not support mmap") @@ -932,12 +965,16 @@ func TestMmapScalarAutoIndex(t *testing.T) { mc := createMilvusClient(ctx, t) // create -> insert -> flush -> index -> load - cp := CollectionParams{CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: false, - ShardsNum: common.DefaultShards, Dim: common.DefaultDim} + cp := CollectionParams{ + CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: false, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim, + } collName := createCollection(ctx, t, mc, cp) - dp := DataParams{DoInsert: true, CollectionName: collName, CollectionFieldsType: AllFields, start: 0, - nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: false} + dp := DataParams{ + DoInsert: true, CollectionName: collName, CollectionFieldsType: AllFields, start: 0, + nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: false, + } insertData(ctx, t, mc, dp) mc.Flush(ctx, collName, false) @@ -956,8 +993,10 @@ func TestAlterIndexMmapUnsupportedIndex(t *testing.T) { mc := createMilvusClient(ctx, t) // create -> insert -> flush -> index -> load - cp := CollectionParams{CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: false, - ShardsNum: common.DefaultShards, Dim: common.DefaultDim} + cp := CollectionParams{ + CollectionFieldsType: AllFields, AutoID: false, EnableDynamicField: false, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim, + } collName := createCollection(ctx, t, mc, cp) // diskAnn @@ -998,12 +1037,16 @@ func TestMmapAlterIndex(t *testing.T) { mc := createMilvusClient(ctx, t) // create -> insert -> flush -> index -> load - cp := CollectionParams{CollectionFieldsType: Int64FloatVec, AutoID: false, EnableDynamicField: false, - ShardsNum: common.DefaultShards, Dim: common.DefaultDim} + cp := CollectionParams{ + CollectionFieldsType: Int64FloatVec, AutoID: false, EnableDynamicField: false, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim, + } collName := createCollection(ctx, t, mc, cp) - dp := DataParams{DoInsert: true, CollectionName: collName, CollectionFieldsType: Int64FloatVec, start: 0, - nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: false} + dp := DataParams{ + DoInsert: true, CollectionName: collName, CollectionFieldsType: Int64FloatVec, start: 0, + nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: false, + } insertData(ctx, t, mc, dp) mc.Flush(ctx, collName, false) @@ -1043,11 +1086,15 @@ func TestMmapSparseCollection(t *testing.T) { mc := createMilvusClient(ctx, t) // create -> insert [0, 3000) -> flush -> index -> load - cp := CollectionParams{CollectionFieldsType: Int64VarcharSparseVec, AutoID: false, EnableDynamicField: true, - ShardsNum: common.DefaultShards, Dim: common.DefaultDim, MaxLength: common.TestMaxLen} + cp := CollectionParams{ + CollectionFieldsType: Int64VarcharSparseVec, AutoID: false, EnableDynamicField: true, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim, MaxLength: common.TestMaxLen, + } - dp := DataParams{DoInsert: true, CollectionFieldsType: Int64VarcharSparseVec, start: 0, nb: common.DefaultNb * 5, - dim: common.DefaultDim, EnableDynamicField: true} + dp := DataParams{ + DoInsert: true, CollectionFieldsType: Int64VarcharSparseVec, start: 0, nb: common.DefaultNb * 5, + dim: common.DefaultDim, EnableDynamicField: true, + } // index params idxHnsw, _ := entity.NewIndexHNSW(entity.L2, 8, 96) @@ -1070,8 +1117,10 @@ func TestMmapSparseCollection(t *testing.T) { common.CheckErr(t, err, true) // search with floatVec field - outputFields := []string{common.DefaultIntFieldName, common.DefaultVarcharFieldName, common.DefaultFloatVecFieldName, - common.DefaultSparseVecFieldName, common.DefaultDynamicFieldName} + outputFields := []string{ + common.DefaultIntFieldName, common.DefaultVarcharFieldName, common.DefaultFloatVecFieldName, + common.DefaultSparseVecFieldName, common.DefaultDynamicFieldName, + } queryVecFloat := common.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector) sp, _ := entity.NewIndexSparseInvertedSearchParam(0) resSearch, errSearch := mc.Search(ctx, collName, []string{}, "", []string{"*"}, queryVecFloat, common.DefaultFloatVecFieldName, diff --git a/test/testcases/resource_group_test.go b/test/testcases/resource_group_test.go index 6c3cf92c..d69aa9c1 100644 --- a/test/testcases/resource_group_test.go +++ b/test/testcases/resource_group_test.go @@ -389,7 +389,7 @@ func TestTransferReplicaNotExistedCollection(t *testing.T) { // transfer replica errTransfer := mc.TransferReplica(ctx, common.DefaultRgName, rgName, common.GenRandomString(3), 1) - common.CheckErr(t, errTransfer, false, "collection not found") + common.CheckErr(t, errTransfer, false, "can't find collection") } // test transfer replicas with invalid replica number diff --git a/test/testcases/ut.log b/test/testcases/ut.log new file mode 100644 index 00000000..1b71b16f --- /dev/null +++ b/test/testcases/ut.log @@ -0,0 +1,109 @@ +2024/08/20 13:05:46 main_test.go:542: parse addr=localhost:19530 +=== RUN TestQueryArrayFieldExpr +2024/08/20 13:05:46 milvus_client.go:14: (ApiRequest): func [NewDefaultGrpcClient], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m59.999895469s]) localhost:19530] +2024/08/20 13:05:46 milvus_client.go:21: (ApiResponse): func [NewDefaultGrpcClient], results: [0xc0001860e0] +2024/08/20 13:05:46 milvus_client.go:14: (ApiRequest): func [CreateCollection], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m59.990038188s]) eYrs 0xc000340d70 2 [0xfca7c0]] +2024/08/20 13:05:46 milvus_client.go:21: (ApiResponse): func [CreateCollection], results: [] +2024/08/20 13:05:47 utils.go:493: expect 100 +2024/08/20 13:05:47 milvus_client.go:14: (ApiRequest): func [InsertRows], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m59.102051463s]) eYrs 3000] +2024/08/20 13:05:49 milvus_client.go:21: (ApiResponse): func [InsertRows], results: [0xc01f6668d0] +2024/08/20 13:05:49 milvus_client.go:14: (ApiRequest): func [Flush], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m56.858153415s]) eYrs false] +2024/08/20 13:05:52 milvus_client.go:21: (ApiResponse): func [Flush], results: [] +2024/08/20 13:05:52 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m54.411170465s]) eYrs floatVec false 0xc00028a9e0 []] +2024/08/20 13:05:56 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:05:56 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m50.194304676s]) eYrs binaryVec false 0xc01f72c0f0 []] +2024/08/20 13:06:00 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:00 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m46.180499627s]) eYrs fp16Vec false 0xc00028a9e0 []] +2024/08/20 13:06:02 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:02 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m44.151899363s]) eYrs bf16Vec false 0xc00028a9e0 []] +2024/08/20 13:06:04 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:04 milvus_client.go:14: (ApiRequest): func [LoadCollection], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m42.227921864s]) eYrs []] +2024/08/20 13:06:05 milvus_client.go:21: (ApiResponse): func [LoadCollection], results: [] +2024/08/20 13:06:05 query_test.go:832: json_contains (int16Array, 100) +2024/08/20 13:06:05 milvus_client.go:14: (ApiRequest): func [Query], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m41.185980494s]) eYrs [] json_contains (int16Array, 100) [count(*)] []] +2024/08/20 13:06:05 milvus_client.go:21: (ApiResponse): func [Query], results: [[0xc01f6fe4b0]] +2024/08/20 13:06:05 query_test.go:835: type:Int64 field_name:"count(*)" scalars: > +2024/08/20 13:06:05 milvus_client.go:14: (ApiRequest): func [ReleaseCollection], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m40.688556826s]) eYrs] +2024/08/20 13:06:06 milvus_client.go:21: (ApiResponse): func [ReleaseCollection], results: [] +2024/08/20 13:06:06 milvus_client.go:14: (ApiRequest): func [DescribeCollection], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m40.45917764s]) eYrs] +2024/08/20 13:06:06 milvus_client.go:21: (ApiResponse): func [DescribeCollection], results: [0xc0002e5500] +2024/08/20 13:06:06 utils.go:1457: Collection eYrs all fileds: [int64 bool int8 int16 int32 float double varchar json floatVec fp16Vec bf16Vec binaryVec boolArray int8Array int16Array int32Array int64Array floatArray doubleArray varcharArray] +2024/08/20 13:06:06 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m40.453045367s]) eYrs boolArray false 0xc01f7e47e0 [0xfca4e0]] +2024/08/20 13:06:09 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:09 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m37.214635597s]) eYrs int8Array false 0xc01f7e47e0 [0xfca4e0]] +2024/08/20 13:06:12 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:12 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m34.187870869s]) eYrs int16Array false 0xc01f7e47e0 [0xfca4e0]] +2024/08/20 13:06:15 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:15 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m31.178102105s]) eYrs int32Array false 0xc01f7e47e0 [0xfca4e0]] +2024/08/20 13:06:18 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:18 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m28.138935868s]) eYrs int64Array false 0xc01f7e47e0 [0xfca4e0]] +2024/08/20 13:06:21 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:21 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m25.208644424s]) eYrs varcharArray false 0xc01f7e47e0 [0xfca4e0]] +2024/08/20 13:06:24 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:24 milvus_client.go:14: (ApiRequest): func [LoadCollection], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m22.184108133s]) eYrs []] +2024/08/20 13:06:24 milvus_client.go:21: (ApiResponse): func [LoadCollection], results: [] +2024/08/20 13:06:24 query_test.go:862: json_contains (int16Array, 100) +2024/08/20 13:06:24 milvus_client.go:14: (ApiRequest): func [Query], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m22.144191242s]) eYrs [] json_contains (int16Array, 100) [count(*)] []] +2024/08/20 13:06:27 milvus_client.go:21: (ApiResponse): func [Query], results: [[0xc01fa488d0]] +2024/08/20 13:06:27 query_test.go:865: type:Int64 field_name:"count(*)" scalars: > +2024/08/20 13:06:27 milvus_client.go:14: (ApiRequest): func [CreateCollection], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m19.121066335s]) BDcb 0xc01fa43450 2 [0xfca7c0]] +2024/08/20 13:06:27 milvus_client.go:21: (ApiResponse): func [CreateCollection], results: [] +2024/08/20 13:06:27 utils.go:493: expect 100 +2024/08/20 13:06:27 milvus_client.go:14: (ApiRequest): func [Insert], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m18.618569037s]) BDcb [0xc01f9c7200 0xc01f9c7230 0xc01f9c7260 0xc01f9c7290 0xc01f9c72c0 0xc01f9c72f0 0xc01f9c7320 0xc01f9c7350 0xc01fafc960 0xc01fafc990 0xc01fafc9c0 0xc01fafc9f0 0xc01fafca20 0xc01fafca50 0xc01fafca80 0xc01fafcab0 0xc01fafcae0 0xc01f8ce000 0xc01f8ce030 0xc01f8ce060 0xc01f8ce090 0xc01f8ce0c0 0xc01f8ce0f0 0xc01f8ce120]] +2024/08/20 13:06:29 milvus_client.go:21: (ApiResponse): func [Insert], results: [0xc01e584210] +2024/08/20 13:06:29 milvus_client.go:14: (ApiRequest): func [Flush], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m16.710567652s]) BDcb false] +2024/08/20 13:06:32 milvus_client.go:21: (ApiResponse): func [Flush], results: [] +2024/08/20 13:06:32 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m13.85886925s]) BDcb floatVec false 0xc0001869e0 []] +2024/08/20 13:06:34 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:34 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m12.164129209s]) BDcb binaryVec false 0xc000121500 []] +2024/08/20 13:06:36 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:36 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m10.176707392s]) BDcb fp16Vec false 0xc0001869e0 []] +2024/08/20 13:06:38 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:38 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m8.167053836s]) BDcb bf16Vec false 0xc0001869e0 []] +2024/08/20 13:06:40 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:40 milvus_client.go:14: (ApiRequest): func [LoadCollection], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m6.172877779s]) BDcb []] +2024/08/20 13:06:42 milvus_client.go:21: (ApiResponse): func [LoadCollection], results: [] +2024/08/20 13:06:42 query_test.go:832: json_contains (int16Array, 100) +2024/08/20 13:06:42 milvus_client.go:14: (ApiRequest): func [Query], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m4.525007934s]) BDcb [] json_contains (int16Array, 100) [count(*)] []] +2024/08/20 13:06:42 milvus_client.go:21: (ApiResponse): func [Query], results: [[0xc01e55a8a0]] +2024/08/20 13:06:42 query_test.go:835: type:Int64 field_name:"count(*)" scalars: > +2024/08/20 13:06:42 milvus_client.go:14: (ApiRequest): func [ReleaseCollection], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m4.300823748s]) BDcb] +2024/08/20 13:06:42 milvus_client.go:21: (ApiResponse): func [ReleaseCollection], results: [] +2024/08/20 13:06:42 milvus_client.go:14: (ApiRequest): func [DescribeCollection], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m3.890065997s]) BDcb] +2024/08/20 13:06:42 milvus_client.go:21: (ApiResponse): func [DescribeCollection], results: [0xc0003050a0] +2024/08/20 13:06:42 utils.go:1457: Collection BDcb all fileds: [int64 bool int8 int16 int32 float double varchar json floatVec fp16Vec bf16Vec binaryVec boolArray int8Array int16Array int32Array int64Array floatArray doubleArray varcharArray] +2024/08/20 13:06:42 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m3.888356072s]) BDcb boolArray false 0xc01e446260 [0xfca4e0]] +2024/08/20 13:06:46 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:46 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [1m0.158887447s]) BDcb int8Array false 0xc01e446260 [0xfca4e0]] +2024/08/20 13:06:49 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:49 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [57.23042059s]) BDcb int16Array false 0xc01e446260 [0xfca4e0]] +2024/08/20 13:06:52 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:52 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [54.193458112s]) BDcb int32Array false 0xc01e446260 [0xfca4e0]] +2024/08/20 13:06:55 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:55 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [51.170879117s]) BDcb int64Array false 0xc01e446260 [0xfca4e0]] +2024/08/20 13:06:58 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:06:58 milvus_client.go:14: (ApiRequest): func [CreateIndex], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [48.145540585s]) BDcb varcharArray false 0xc01e446260 [0xfca4e0]] +2024/08/20 13:07:01 milvus_client.go:21: (ApiResponse): func [CreateIndex], results: [] +2024/08/20 13:07:01 milvus_client.go:14: (ApiRequest): func [LoadCollection], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [45.218971559s]) BDcb []] +2024/08/20 13:07:01 milvus_client.go:21: (ApiResponse): func [LoadCollection], results: [] +2024/08/20 13:07:01 query_test.go:862: json_contains (int16Array, 100) +2024/08/20 13:07:01 milvus_client.go:14: (ApiRequest): func [Query], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [45.180351592s]) BDcb [] json_contains (int16Array, 100) [count(*)] []] +2024/08/20 13:07:02 milvus_client.go:21: (ApiResponse): func [Query], results: [[0xc01f325ef0]] +2024/08/20 13:07:02 query_test.go:865: type:Int64 field_name:"count(*)" scalars: > +2024/08/20 13:07:02 milvus_client.go:14: (ApiRequest): func [DropCollection], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [43.764403217s]) BDcb] +2024/08/20 13:07:02 milvus_client.go:21: (ApiResponse): func [DropCollection], results: [] +2024/08/20 13:07:02 milvus_client.go:14: (ApiRequest): func [DropCollection], args: [context.Background.WithDeadline(2024-08-20 13:07:46.537079296 +0800 CST m=+120.024016181 [43.727240371s]) eYrs] +2024/08/20 13:07:02 milvus_client.go:21: (ApiResponse): func [DropCollection], results: [] +2024/08/20 13:07:02 milvus_client.go:14: (ApiRequest): func [Close], args: [] +2024/08/20 13:07:02 milvus_client.go:21: (ApiResponse): func [Close], results: [] +--- PASS: TestQueryArrayFieldExpr (76.28s) +PASS +coverage: 0.0% of statements +2024/08/20 13:07:02 main_test.go:29: Start to tear down all +2024/08/20 13:07:02 milvus_client.go:14: (ApiRequest): func [NewDefaultGrpcClient], args: [context.Background.WithDeadline(2024-08-20 13:09:02.822676352 +0800 CST m=+196.309613251 [1m59.999947033s]) localhost:19530] +2024/08/20 13:07:02 milvus_client.go:21: (ApiResponse): func [NewDefaultGrpcClient], results: [0xc01f2f8240] +2024/08/20 13:07:02 milvus_client.go:14: (ApiRequest): func [ListDatabases], args: [context.Background.WithDeadline(2024-08-20 13:09:02.822676352 +0800 CST m=+196.309613251 [1m59.996060858s])] +2024/08/20 13:07:02 milvus_client.go:21: (ApiResponse): func [ListDatabases], results: [[{default []}]] +2024/08/20 13:07:02 milvus_client.go:14: (ApiRequest): func [Close], args: [] +2024/08/20 13:07:02 milvus_client.go:21: (ApiResponse): func [Close], results: [] +ok github.com/milvus-io/milvus-sdk-go/v2/test/testcases 77.458s