From fd922d921a545eeb5a337573d5c38b3062767558 Mon Sep 17 00:00:00 2001 From: congqixia Date: Mon, 24 Jun 2024 14:52:03 +0800 Subject: [PATCH] enhance: Add nilness linter and fix some small issues (#34049) Add `nilness` for govet linter and fixed some detected issues Signed-off-by: Congqi Xia --- .golangci.yml | 3 +++ internal/indexnode/task_analyze.go | 12 ------------ internal/proxy/simple_rate_limiter.go | 2 +- internal/querynodev2/segments/mock_data.go | 4 ++-- internal/storage/azure_object_storage.go | 7 ------- internal/storage/print_binlog_test.go | 8 ++++---- 6 files changed, 10 insertions(+), 26 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 91895ce0cc115..40320aef714d1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -41,6 +41,9 @@ linters-settings: - default - prefix(github.com/milvus-io) custom-order: true + govet: + enable: # add extra linters + - nilness gofumpt: lang-version: "1.18" module-path: github.com/milvus-io diff --git a/internal/indexnode/task_analyze.go b/internal/indexnode/task_analyze.go index 3608ec3519755..e78d1dfbb2019 100644 --- a/internal/indexnode/task_analyze.go +++ b/internal/indexnode/task_analyze.go @@ -74,10 +74,6 @@ func (at *analyzeTask) Execute(ctx context.Context) error { zap.Int64("partitionID", at.req.GetPartitionID()), zap.Int64("fieldID", at.req.GetFieldID())) log.Info("Begin to build analyze task") - if err != nil { - log.Warn("create analyze info failed", zap.Error(err)) - return err - } storageConfig := &clusteringpb.StorageConfig{ Address: at.req.GetStorageConfig().GetAddress(), @@ -103,19 +99,11 @@ func (at *analyzeTask) Execute(ctx context.Context) error { numRows := stats.GetNumRows() numRowsMap[segID] = numRows log.Info("append segment rows", zap.Int64("segment id", segID), zap.Int64("rows", numRows)) - if err != nil { - log.Warn("append segment num rows failed", zap.Error(err)) - return err - } insertFiles := make([]string, 0, len(stats.GetLogIDs())) for _, id := range stats.GetLogIDs() { path := metautil.BuildInsertLogPath(at.req.GetStorageConfig().RootPath, at.req.GetCollectionID(), at.req.GetPartitionID(), segID, at.req.GetFieldID(), id) insertFiles = append(insertFiles, path) - if err != nil { - log.Warn("append insert binlog path failed", zap.Error(err)) - return err - } } segmentInsertFilesMap[segID] = &clusteringpb.InsertFiles{InsertFiles: insertFiles} } diff --git a/internal/proxy/simple_rate_limiter.go b/internal/proxy/simple_rate_limiter.go index 1803de81e6a14..65fcc8055151c 100644 --- a/internal/proxy/simple_rate_limiter.go +++ b/internal/proxy/simple_rate_limiter.go @@ -92,7 +92,7 @@ func (m *SimpleLimiter) Check(dbID int64, collectionIDToPartIDs map[int64][]int6 } // 2. check database level rate limits - if ret == nil && dbID != util.InvalidDBID { + if dbID != util.InvalidDBID { dbRateLimiters := m.rateLimiter.GetOrCreateDatabaseLimiters(dbID, newDatabaseLimiter) ret = dbRateLimiters.Check(rt, n) if ret != nil { diff --git a/internal/querynodev2/segments/mock_data.go b/internal/querynodev2/segments/mock_data.go index 03080b870c05d..097393bebfff0 100644 --- a/internal/querynodev2/segments/mock_data.go +++ b/internal/querynodev2/segments/mock_data.go @@ -465,7 +465,7 @@ func SaveBinLog(ctx context.Context, k := JoinIDPath(collectionID, partitionID, segmentID, fieldID) key := path.Join(chunkManager.RootPath(), "stats-log", k) - kvs[key] = blob.Value[:] + kvs[key] = blob.Value statsBinlog = append(statsBinlog, &datapb.FieldBinlog{ FieldID: fieldID, Binlogs: []*datapb.Binlog{{LogPath: key}}, @@ -653,7 +653,7 @@ func SaveDeltaLog(collectionID int64, key := JoinIDPath(collectionID, partitionID, segmentID, pkFieldID) // keyPath := path.Join(defaultLocalStorage, "delta-log", key) keyPath := path.Join(cm.RootPath(), "delta-log", key) - kvs[keyPath] = blob.Value[:] + kvs[keyPath] = blob.Value fieldBinlog = append(fieldBinlog, &datapb.FieldBinlog{ FieldID: pkFieldID, Binlogs: []*datapb.Binlog{{ diff --git a/internal/storage/azure_object_storage.go b/internal/storage/azure_object_storage.go index 58b464c9673e7..66890b3e203b4 100644 --- a/internal/storage/azure_object_storage.go +++ b/internal/storage/azure_object_storage.go @@ -174,13 +174,6 @@ func (b *BlobReader) Seek(offset int64, whence int) (int64, error) { } func (AzureObjectStorage *AzureObjectStorage) GetObject(ctx context.Context, bucketName, objectName string, offset int64, size int64) (FileReader, error) { - opts := azblob.DownloadStreamOptions{} - if offset > 0 { - opts.Range = azblob.HTTPRange{ - Offset: offset, - Count: size, - } - } return NewBlobReader(AzureObjectStorage.Client.NewContainerClient(bucketName).NewBlockBlobClient(objectName), offset) } diff --git a/internal/storage/print_binlog_test.go b/internal/storage/print_binlog_test.go index 9d3b3dfb21141..0409430b32e85 100644 --- a/internal/storage/print_binlog_test.go +++ b/internal/storage/print_binlog_test.go @@ -441,10 +441,10 @@ func TestPrintDDFiles(t *testing.T) { dropPartitionString, err := proto.Marshal(&dropPartitionReq) assert.NoError(t, err) ddRequests := []string{ - string(createCollString[:]), - string(dropCollString[:]), - string(createPartitionString[:]), - string(dropPartitionString[:]), + string(createCollString), + string(dropCollString), + string(createPartitionString), + string(dropPartitionString), } eventTypeCodes := []EventTypeCode{ CreateCollectionEventType,