Skip to content

Commit

Permalink
enhance: Add nilness linter and fix some small issues (#34049)
Browse files Browse the repository at this point in the history
Add `nilness` for govet linter and fixed some detected issues

Signed-off-by: Congqi Xia <[email protected]>
  • Loading branch information
congqixia authored Jun 24, 2024
1 parent a1248a1 commit fd922d9
Show file tree
Hide file tree
Showing 6 changed files with 10 additions and 26 deletions.
3 changes: 3 additions & 0 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ linters-settings:
- default
- prefix(github.com/milvus-io)
custom-order: true
govet:
enable: # add extra linters
- nilness
gofumpt:
lang-version: "1.18"
module-path: github.com/milvus-io
Expand Down
12 changes: 0 additions & 12 deletions internal/indexnode/task_analyze.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,6 @@ func (at *analyzeTask) Execute(ctx context.Context) error {
zap.Int64("partitionID", at.req.GetPartitionID()), zap.Int64("fieldID", at.req.GetFieldID()))

log.Info("Begin to build analyze task")
if err != nil {
log.Warn("create analyze info failed", zap.Error(err))
return err
}

storageConfig := &clusteringpb.StorageConfig{
Address: at.req.GetStorageConfig().GetAddress(),
Expand All @@ -103,19 +99,11 @@ func (at *analyzeTask) Execute(ctx context.Context) error {
numRows := stats.GetNumRows()
numRowsMap[segID] = numRows
log.Info("append segment rows", zap.Int64("segment id", segID), zap.Int64("rows", numRows))
if err != nil {
log.Warn("append segment num rows failed", zap.Error(err))
return err
}
insertFiles := make([]string, 0, len(stats.GetLogIDs()))
for _, id := range stats.GetLogIDs() {
path := metautil.BuildInsertLogPath(at.req.GetStorageConfig().RootPath,
at.req.GetCollectionID(), at.req.GetPartitionID(), segID, at.req.GetFieldID(), id)
insertFiles = append(insertFiles, path)
if err != nil {
log.Warn("append insert binlog path failed", zap.Error(err))
return err
}
}
segmentInsertFilesMap[segID] = &clusteringpb.InsertFiles{InsertFiles: insertFiles}
}
Expand Down
2 changes: 1 addition & 1 deletion internal/proxy/simple_rate_limiter.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ func (m *SimpleLimiter) Check(dbID int64, collectionIDToPartIDs map[int64][]int6
}

// 2. check database level rate limits
if ret == nil && dbID != util.InvalidDBID {
if dbID != util.InvalidDBID {
dbRateLimiters := m.rateLimiter.GetOrCreateDatabaseLimiters(dbID, newDatabaseLimiter)
ret = dbRateLimiters.Check(rt, n)
if ret != nil {
Expand Down
4 changes: 2 additions & 2 deletions internal/querynodev2/segments/mock_data.go
Original file line number Diff line number Diff line change
Expand Up @@ -465,7 +465,7 @@ func SaveBinLog(ctx context.Context,

k := JoinIDPath(collectionID, partitionID, segmentID, fieldID)
key := path.Join(chunkManager.RootPath(), "stats-log", k)
kvs[key] = blob.Value[:]
kvs[key] = blob.Value
statsBinlog = append(statsBinlog, &datapb.FieldBinlog{
FieldID: fieldID,
Binlogs: []*datapb.Binlog{{LogPath: key}},
Expand Down Expand Up @@ -653,7 +653,7 @@ func SaveDeltaLog(collectionID int64,
key := JoinIDPath(collectionID, partitionID, segmentID, pkFieldID)
// keyPath := path.Join(defaultLocalStorage, "delta-log", key)
keyPath := path.Join(cm.RootPath(), "delta-log", key)
kvs[keyPath] = blob.Value[:]
kvs[keyPath] = blob.Value
fieldBinlog = append(fieldBinlog, &datapb.FieldBinlog{
FieldID: pkFieldID,
Binlogs: []*datapb.Binlog{{
Expand Down
7 changes: 0 additions & 7 deletions internal/storage/azure_object_storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,13 +174,6 @@ func (b *BlobReader) Seek(offset int64, whence int) (int64, error) {
}

func (AzureObjectStorage *AzureObjectStorage) GetObject(ctx context.Context, bucketName, objectName string, offset int64, size int64) (FileReader, error) {
opts := azblob.DownloadStreamOptions{}
if offset > 0 {
opts.Range = azblob.HTTPRange{
Offset: offset,
Count: size,
}
}
return NewBlobReader(AzureObjectStorage.Client.NewContainerClient(bucketName).NewBlockBlobClient(objectName), offset)
}

Expand Down
8 changes: 4 additions & 4 deletions internal/storage/print_binlog_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -441,10 +441,10 @@ func TestPrintDDFiles(t *testing.T) {
dropPartitionString, err := proto.Marshal(&dropPartitionReq)
assert.NoError(t, err)
ddRequests := []string{
string(createCollString[:]),
string(dropCollString[:]),
string(createPartitionString[:]),
string(dropPartitionString[:]),
string(createCollString),
string(dropCollString),
string(createPartitionString),
string(dropPartitionString),
}
eventTypeCodes := []EventTypeCode{
CreateCollectionEventType,
Expand Down

0 comments on commit fd922d9

Please sign in to comment.