diff --git a/internal/querynodev2/delegator/segment_pruner.go b/internal/querynodev2/delegator/segment_pruner.go index a1ea6129cb3c9..e0ff6d041ecbf 100644 --- a/internal/querynodev2/delegator/segment_pruner.go +++ b/internal/querynodev2/delegator/segment_pruner.go @@ -24,6 +24,7 @@ import ( "github.com/milvus-io/milvus/pkg/util/distance" "github.com/milvus-io/milvus/pkg/util/funcutil" "github.com/milvus-io/milvus/pkg/util/merr" + "github.com/milvus-io/milvus/pkg/util/timerecord" "github.com/milvus-io/milvus/pkg/util/typeutil" ) @@ -48,7 +49,7 @@ func PruneSegments(ctx context.Context, // no need to prune return } - + tr := timerecord.NewTimeRecorder("PruneSegments") var collectionID int64 var expr []byte if searchReq != nil { @@ -120,16 +121,23 @@ func PruneSegments(ctx context.Context, item.Segments = newSegments sealedSegments[idx] = item } + filterRatio := float32(realFilteredSegments) / float32(totalSegNum) metrics.QueryNodeSegmentPruneRatio. WithLabelValues(fmt.Sprint(collectionID), fmt.Sprint(typeutil.IsVectorType(clusteringKeyField.GetDataType()))). - Observe(float64(realFilteredSegments / totalSegNum)) + Observe(float64(filterRatio)) log.Ctx(ctx).Debug("Pruned segment for search/query", zap.Int("filtered_segment_num[stats]", len(filteredSegments)), zap.Int("filtered_segment_num[excluded]", realFilteredSegments), zap.Int("total_segment_num", totalSegNum), - zap.Float32("filtered_ratio", float32(realFilteredSegments)/float32(totalSegNum)), + zap.Float32("filtered_ratio", filterRatio), ) } + + metrics.QueryNodeSegmentPruneLatency.WithLabelValues(fmt.Sprint(collectionID), + fmt.Sprint(typeutil.IsVectorType(clusteringKeyField.GetDataType()))). + Observe(float64(tr.ElapseSpan().Milliseconds())) + log.Ctx(ctx).Debug("Pruned segment for search/query", + zap.Duration("duration", tr.ElapseSpan())) } type segmentDisStruct struct { diff --git a/pkg/metrics/querynode_metrics.go b/pkg/metrics/querynode_metrics.go index 4675a3431f775..e46da21fb6463 100644 --- a/pkg/metrics/querynode_metrics.go +++ b/pkg/metrics/querynode_metrics.go @@ -374,6 +374,18 @@ var ( isVectorFieldLabelName, }) + QueryNodeSegmentPruneLatency = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: milvusNamespace, + Subsystem: typeutil.QueryNodeRole, + Name: "segment_prune_latency", + Help: "latency of segment prune", + Buckets: buckets, + }, []string{ + collectionIDLabelName, + isVectorFieldLabelName, + }) + QueryNodeEvictedReadReqCount = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: milvusNamespace, @@ -790,6 +802,7 @@ func RegisterQueryNode(registry *prometheus.Registry) { registry.MustRegister(QueryNodeDiskCacheEvictDuration) registry.MustRegister(QueryNodeDiskCacheEvictGlobalDuration) registry.MustRegister(QueryNodeSegmentPruneRatio) + registry.MustRegister(QueryNodeSegmentPruneLatency) registry.MustRegister(QueryNodeApplyBFCost) registry.MustRegister(QueryNodeForwardDeleteCost) // Add cgo metrics