From 721b773a94ab1bb190d5f4c37cefe8ed054f0787 Mon Sep 17 00:00:00 2001
From: SungJin1212 <tjdwls1201@gmail.com>
Date: Thu, 7 Nov 2024 17:39:04 +0900
Subject: [PATCH 1/6] Add remote write proto v2

Signed-off-by: SungJin1212 <tjdwls1201@gmail.com>
---
 Makefile                      |    1 +
 pkg/cortexpbv2/cortexv2.pb.go | 4340 +++++++++++++++++++++++++++++++++
 pkg/cortexpbv2/cortexv2.proto |  227 ++
 3 files changed, 4568 insertions(+)
 create mode 100644 pkg/cortexpbv2/cortexv2.pb.go
 create mode 100644 pkg/cortexpbv2/cortexv2.proto

diff --git a/Makefile b/Makefile
index 2704ee35b6..5936409ebb 100644
--- a/Makefile
+++ b/Makefile
@@ -85,6 +85,7 @@ $(foreach exe, $(EXES), $(eval $(call dep_exe, $(exe))))
 
 # Manually declared dependencies And what goes into each exe
 pkg/cortexpb/cortex.pb.go: pkg/cortexpb/cortex.proto
+pkg/cortexpbv2/cortexv2.pb.go: pkg/cortexpbv2/cortexv2.proto
 pkg/ingester/client/ingester.pb.go: pkg/ingester/client/ingester.proto
 pkg/distributor/distributorpb/distributor.pb.go: pkg/distributor/distributorpb/distributor.proto
 pkg/ingester/wal.pb.go: pkg/ingester/wal.proto
diff --git a/pkg/cortexpbv2/cortexv2.pb.go b/pkg/cortexpbv2/cortexv2.pb.go
new file mode 100644
index 0000000000..456977d170
--- /dev/null
+++ b/pkg/cortexpbv2/cortexv2.pb.go
@@ -0,0 +1,4340 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: cortexv2.proto
+
+package cortexpbv2
+
+import (
+	encoding_binary "encoding/binary"
+	fmt "fmt"
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+	reflect "reflect"
+	strconv "strconv"
+	strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+type WriteRequest_SourceEnum int32
+
+const (
+	API  WriteRequest_SourceEnum = 0
+	RULE WriteRequest_SourceEnum = 1
+)
+
+var WriteRequest_SourceEnum_name = map[int32]string{
+	0: "API",
+	1: "RULE",
+}
+
+var WriteRequest_SourceEnum_value = map[string]int32{
+	"API":  0,
+	"RULE": 1,
+}
+
+func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_affad2b75b7d03df, []int{0, 0}
+}
+
+type Metadata_MetricType int32
+
+const (
+	METRIC_TYPE_UNSPECIFIED    Metadata_MetricType = 0
+	METRIC_TYPE_COUNTER        Metadata_MetricType = 1
+	METRIC_TYPE_GAUGE          Metadata_MetricType = 2
+	METRIC_TYPE_HISTOGRAM      Metadata_MetricType = 3
+	METRIC_TYPE_GAUGEHISTOGRAM Metadata_MetricType = 4
+	METRIC_TYPE_SUMMARY        Metadata_MetricType = 5
+	METRIC_TYPE_INFO           Metadata_MetricType = 6
+	METRIC_TYPE_STATESET       Metadata_MetricType = 7
+)
+
+var Metadata_MetricType_name = map[int32]string{
+	0: "METRIC_TYPE_UNSPECIFIED",
+	1: "METRIC_TYPE_COUNTER",
+	2: "METRIC_TYPE_GAUGE",
+	3: "METRIC_TYPE_HISTOGRAM",
+	4: "METRIC_TYPE_GAUGEHISTOGRAM",
+	5: "METRIC_TYPE_SUMMARY",
+	6: "METRIC_TYPE_INFO",
+	7: "METRIC_TYPE_STATESET",
+}
+
+var Metadata_MetricType_value = map[string]int32{
+	"METRIC_TYPE_UNSPECIFIED":    0,
+	"METRIC_TYPE_COUNTER":        1,
+	"METRIC_TYPE_GAUGE":          2,
+	"METRIC_TYPE_HISTOGRAM":      3,
+	"METRIC_TYPE_GAUGEHISTOGRAM": 4,
+	"METRIC_TYPE_SUMMARY":        5,
+	"METRIC_TYPE_INFO":           6,
+	"METRIC_TYPE_STATESET":       7,
+}
+
+func (Metadata_MetricType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_affad2b75b7d03df, []int{5, 0}
+}
+
+type Histogram_ResetHint int32
+
+const (
+	RESET_HINT_UNSPECIFIED Histogram_ResetHint = 0
+	RESET_HINT_YES         Histogram_ResetHint = 1
+	RESET_HINT_NO          Histogram_ResetHint = 2
+	RESET_HINT_GAUGE       Histogram_ResetHint = 3
+)
+
+var Histogram_ResetHint_name = map[int32]string{
+	0: "RESET_HINT_UNSPECIFIED",
+	1: "RESET_HINT_YES",
+	2: "RESET_HINT_NO",
+	3: "RESET_HINT_GAUGE",
+}
+
+var Histogram_ResetHint_value = map[string]int32{
+	"RESET_HINT_UNSPECIFIED": 0,
+	"RESET_HINT_YES":         1,
+	"RESET_HINT_NO":          2,
+	"RESET_HINT_GAUGE":       3,
+}
+
+func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_affad2b75b7d03df, []int{6, 0}
+}
+
+// https://github.com/prometheus/prometheus/blob/main/prompb/io/prometheus/write/v2/types.proto
+type WriteRequest struct {
+	Source                  WriteRequest_SourceEnum `protobuf:"varint,3,opt,name=Source,proto3,enum=cortexpbv2.WriteRequest_SourceEnum" json:"Source,omitempty"`
+	Symbols                 []string                `protobuf:"bytes,4,rep,name=symbols,proto3" json:"symbols,omitempty"`
+	Timeseries              []PreallocTimeseriesV2  `protobuf:"bytes,5,rep,name=timeseries,proto3,customtype=PreallocTimeseriesV2" json:"timeseries"`
+	SkipLabelNameValidation bool                    `protobuf:"varint,1000,opt,name=skip_label_name_validation,json=skipLabelNameValidation,proto3" json:"skip_label_name_validation,omitempty"`
+}
+
+func (m *WriteRequest) Reset()      { *m = WriteRequest{} }
+func (*WriteRequest) ProtoMessage() {}
+func (*WriteRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_affad2b75b7d03df, []int{0}
+}
+func (m *WriteRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WriteRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WriteRequest.Merge(m, src)
+}
+func (m *WriteRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WriteRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WriteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WriteRequest proto.InternalMessageInfo
+
+func (m *WriteRequest) GetSource() WriteRequest_SourceEnum {
+	if m != nil {
+		return m.Source
+	}
+	return API
+}
+
+func (m *WriteRequest) GetSymbols() []string {
+	if m != nil {
+		return m.Symbols
+	}
+	return nil
+}
+
+func (m *WriteRequest) GetSkipLabelNameValidation() bool {
+	if m != nil {
+		return m.SkipLabelNameValidation
+	}
+	return false
+}
+
+type WriteResponse struct {
+	// Samples represents X-Prometheus-Remote-Write-Written-Samples
+	Samples int64 `protobuf:"varint,1,opt,name=Samples,proto3" json:"Samples,omitempty"`
+	// Histograms represents X-Prometheus-Remote-Write-Written-Histograms
+	Histograms int64 `protobuf:"varint,2,opt,name=Histograms,proto3" json:"Histograms,omitempty"`
+	// Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars
+	Exemplars int64 `protobuf:"varint,3,opt,name=Exemplars,proto3" json:"Exemplars,omitempty"`
+}
+
+func (m *WriteResponse) Reset()      { *m = WriteResponse{} }
+func (*WriteResponse) ProtoMessage() {}
+func (*WriteResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_affad2b75b7d03df, []int{1}
+}
+func (m *WriteResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WriteResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WriteResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WriteResponse.Merge(m, src)
+}
+func (m *WriteResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *WriteResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_WriteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WriteResponse proto.InternalMessageInfo
+
+func (m *WriteResponse) GetSamples() int64 {
+	if m != nil {
+		return m.Samples
+	}
+	return 0
+}
+
+func (m *WriteResponse) GetHistograms() int64 {
+	if m != nil {
+		return m.Histograms
+	}
+	return 0
+}
+
+func (m *WriteResponse) GetExemplars() int64 {
+	if m != nil {
+		return m.Exemplars
+	}
+	return 0
+}
+
+type TimeSeries struct {
+	LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"`
+	// Timeseries messages can either specify samples or (native) histogram samples
+	// (histogram field), but not both. For a typical sender (real-time metric
+	// streaming), in healthy cases, there will be only one sample or histogram.
+	//
+	// Samples and histograms are sorted by timestamp (older first).
+	Samples    []Sample    `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
+	Histograms []Histogram `protobuf:"bytes,3,rep,name=histograms,proto3" json:"histograms"`
+	// exemplars represents an optional set of exemplars attached to this series' samples.
+	Exemplars []Exemplar `protobuf:"bytes,4,rep,name=exemplars,proto3" json:"exemplars"`
+	// metadata represents the metadata associated with the given series' samples.
+	Metadata Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"`
+	// created_timestamp represents an optional created timestamp associated with
+	// this series' samples in ms format, typically for counter or histogram type
+	// metrics. Created timestamp represents the time when the counter started
+	// counting (sometimes referred to as start timestamp), which can increase
+	// the accuracy of query results.
+	//
+	// Note that some receivers might require this and in return fail to
+	// ingest such samples within the Request.
+	//
+	// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+	// for conversion from/to time.Time to Prometheus timestamp.
+	//
+	// Note that the "optional" keyword is omitted due to
+	// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
+	// Zero value means value not set. If you need to use exactly zero value for
+	// the timestamp, use 1 millisecond before or after.
+	CreatedTimestamp int64 `protobuf:"varint,6,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
+}
+
+func (m *TimeSeries) Reset()      { *m = TimeSeries{} }
+func (*TimeSeries) ProtoMessage() {}
+func (*TimeSeries) Descriptor() ([]byte, []int) {
+	return fileDescriptor_affad2b75b7d03df, []int{2}
+}
+func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TimeSeries) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TimeSeries.Merge(m, src)
+}
+func (m *TimeSeries) XXX_Size() int {
+	return m.Size()
+}
+func (m *TimeSeries) XXX_DiscardUnknown() {
+	xxx_messageInfo_TimeSeries.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
+
+func (m *TimeSeries) GetLabelsRefs() []uint32 {
+	if m != nil {
+		return m.LabelsRefs
+	}
+	return nil
+}
+
+func (m *TimeSeries) GetSamples() []Sample {
+	if m != nil {
+		return m.Samples
+	}
+	return nil
+}
+
+func (m *TimeSeries) GetHistograms() []Histogram {
+	if m != nil {
+		return m.Histograms
+	}
+	return nil
+}
+
+func (m *TimeSeries) GetExemplars() []Exemplar {
+	if m != nil {
+		return m.Exemplars
+	}
+	return nil
+}
+
+func (m *TimeSeries) GetMetadata() Metadata {
+	if m != nil {
+		return m.Metadata
+	}
+	return Metadata{}
+}
+
+func (m *TimeSeries) GetCreatedTimestamp() int64 {
+	if m != nil {
+		return m.CreatedTimestamp
+	}
+	return 0
+}
+
+// Exemplar is an additional information attached to some series' samples.
+// It is typically used to attach an example trace or request ID associated with
+// the metric changes.
+type Exemplar struct {
+	// labels_refs is an optional list of label name-value pair references, encoded
+	// as indices to the Request.symbols array. This list's len is always
+	// a multiple of 2, and the underlying labels should be sorted lexicographically.
+	// If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
+	LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"`
+	// value represents an exact example value. This can be useful when the exemplar
+	// is attached to a histogram, which only gives an estimated value through buckets.
+	Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
+	// timestamp represents the timestamp of the exemplar in ms.
+	//
+	// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+	// for conversion from/to time.Time to Prometheus timestamp.
+	Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+}
+
+func (m *Exemplar) Reset()      { *m = Exemplar{} }
+func (*Exemplar) ProtoMessage() {}
+func (*Exemplar) Descriptor() ([]byte, []int) {
+	return fileDescriptor_affad2b75b7d03df, []int{3}
+}
+func (m *Exemplar) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Exemplar) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Exemplar.Merge(m, src)
+}
+func (m *Exemplar) XXX_Size() int {
+	return m.Size()
+}
+func (m *Exemplar) XXX_DiscardUnknown() {
+	xxx_messageInfo_Exemplar.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Exemplar proto.InternalMessageInfo
+
+func (m *Exemplar) GetLabelsRefs() []uint32 {
+	if m != nil {
+		return m.LabelsRefs
+	}
+	return nil
+}
+
+func (m *Exemplar) GetValue() float64 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+func (m *Exemplar) GetTimestamp() int64 {
+	if m != nil {
+		return m.Timestamp
+	}
+	return 0
+}
+
+// Sample represents series sample.
+type Sample struct {
+	// value of the sample.
+	Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+	// timestamp represents timestamp of the sample in ms.
+	//
+	// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+	// for conversion from/to time.Time to Prometheus timestamp.
+	Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+}
+
+func (m *Sample) Reset()      { *m = Sample{} }
+func (*Sample) ProtoMessage() {}
+func (*Sample) Descriptor() ([]byte, []int) {
+	return fileDescriptor_affad2b75b7d03df, []int{4}
+}
+func (m *Sample) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Sample.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Sample) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Sample.Merge(m, src)
+}
+func (m *Sample) XXX_Size() int {
+	return m.Size()
+}
+func (m *Sample) XXX_DiscardUnknown() {
+	xxx_messageInfo_Sample.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Sample proto.InternalMessageInfo
+
+func (m *Sample) GetValue() float64 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+func (m *Sample) GetTimestamp() int64 {
+	if m != nil {
+		return m.Timestamp
+	}
+	return 0
+}
+
+// Metadata represents the metadata associated with the given series' samples.
+type Metadata struct {
+	Type Metadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortexpbv2.Metadata_MetricType" json:"type,omitempty"`
+	// help_ref is a reference to the Request.symbols array representing help
+	// text for the metric. Help is optional, reference should point to an empty string in
+	// such a case.
+	HelpRef uint32 `protobuf:"varint,3,opt,name=help_ref,json=helpRef,proto3" json:"help_ref,omitempty"`
+	// unit_ref is a reference to the Request.symbols array representing a unit
+	// for the metric. Unit is optional, reference should point to an empty string in
+	// such a case.
+	UnitRef uint32 `protobuf:"varint,4,opt,name=unit_ref,json=unitRef,proto3" json:"unit_ref,omitempty"`
+}
+
+func (m *Metadata) Reset()      { *m = Metadata{} }
+func (*Metadata) ProtoMessage() {}
+func (*Metadata) Descriptor() ([]byte, []int) {
+	return fileDescriptor_affad2b75b7d03df, []int{5}
+}
+func (m *Metadata) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Metadata) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Metadata.Merge(m, src)
+}
+func (m *Metadata) XXX_Size() int {
+	return m.Size()
+}
+func (m *Metadata) XXX_DiscardUnknown() {
+	xxx_messageInfo_Metadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metadata proto.InternalMessageInfo
+
+func (m *Metadata) GetType() Metadata_MetricType {
+	if m != nil {
+		return m.Type
+	}
+	return METRIC_TYPE_UNSPECIFIED
+}
+
+func (m *Metadata) GetHelpRef() uint32 {
+	if m != nil {
+		return m.HelpRef
+	}
+	return 0
+}
+
+func (m *Metadata) GetUnitRef() uint32 {
+	if m != nil {
+		return m.UnitRef
+	}
+	return 0
+}
+
+// A native histogram, also known as a sparse histogram.
+// Original design doc:
+// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
+// The appendix of this design doc also explains the concept of float
+// histograms. This Histogram message can represent both, the usual
+// integer histogram as well as a float histogram.
+type Histogram struct {
+	// Types that are valid to be assigned to Count:
+	//
+	//	*Histogram_CountInt
+	//	*Histogram_CountFloat
+	Count isHistogram_Count `protobuf_oneof:"count"`
+	Sum   float64           `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"`
+	// The schema defines the bucket schema. Currently, valid numbers
+	// are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be
+	// added in future for new bucketing layouts.
+	//
+	// The schema equal to -53 means custom buckets. See
+	// custom_values field description for more details.
+	//
+	// Values between -4 and 8 represent base-2 bucket schema, where 1
+	// is a bucket boundary in each case, and then each power of two is
+	// divided into 2^n (n is schema value) logarithmic buckets. Or in other words,
+	// each bucket boundary is the previous boundary times 2^(2^-n).
+	Schema        int32   `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"`
+	ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"`
+	// Types that are valid to be assigned to ZeroCount:
+	//
+	//	*Histogram_ZeroCountInt
+	//	*Histogram_ZeroCountFloat
+	ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"`
+	// Negative Buckets.
+	NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"`
+	// Use either "negative_deltas" or "negative_counts", the former for
+	// regular histograms with integer counts, the latter for
+	// float histograms.
+	NegativeDeltas []int64   `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"`
+	NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"`
+	// Positive Buckets.
+	//
+	// In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows:
+	// * The span offset+length points to an the index of the custom_values array
+	// or +Inf if pointing to the len of the array.
+	// * The counts and deltas have the same meaning as for exponential histograms.
+	PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"`
+	// Use either "positive_deltas" or "positive_counts", the former for
+	// regular histograms with integer counts, the latter for
+	// float histograms.
+	PositiveDeltas []int64             `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"`
+	PositiveCounts []float64           `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"`
+	ResetHint      Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=cortexpbv2.Histogram_ResetHint" json:"reset_hint,omitempty"`
+	// timestamp represents timestamp of the sample in ms.
+	//
+	// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+	// for conversion from/to time.Time to Prometheus timestamp.
+	Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+	// custom_values is an additional field used by non-exponential bucketing layouts.
+	//
+	// For custom buckets (-53 schema value) custom_values specify monotonically
+	// increasing upper inclusive boundaries for the bucket counts with arbitrary
+	// widths for this histogram. In other words, custom_values represents custom,
+	// explicit bucketing that could have been converted from the classic histograms.
+	//
+	// Those bounds are then referenced by spans in positive_spans with corresponding positive
+	// counts of deltas (refer to positive_spans for more details). This way we can
+	// have encode sparse histograms with custom bucketing (many buckets are often
+	// not used).
+	//
+	// Note that for custom bounds, even negative observations are placed in the positive
+	// counts to simplify the implementation and avoid ambiguity of where to place
+	// an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and
+	// the zero bucket are unused, if the schema indicates custom bucketing.
+	//
+	// For each upper boundary the previous boundary represent the lower exclusive
+	// boundary for that bucket. The first element is the upper inclusive boundary
+	// for the first bucket, which implicitly has a lower inclusive bound of -Inf.
+	// This is similar to "le" label semantics on classic histograms. You may add a
+	// bucket with an upper bound of 0 to make sure that you really have no negative
+	// observations, but in practice, native histogram rendering will show both with
+	// or without first upper boundary 0 and no negative counts as the same case.
+	//
+	// The last element is not only the upper inclusive bound of the last regular
+	// bucket, but implicitly the lower exclusive bound of the +Inf bucket.
+	CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"`
+}
+
+func (m *Histogram) Reset()      { *m = Histogram{} }
+func (*Histogram) ProtoMessage() {}
+func (*Histogram) Descriptor() ([]byte, []int) {
+	return fileDescriptor_affad2b75b7d03df, []int{6}
+}
+func (m *Histogram) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Histogram) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Histogram.Merge(m, src)
+}
+func (m *Histogram) XXX_Size() int {
+	return m.Size()
+}
+func (m *Histogram) XXX_DiscardUnknown() {
+	xxx_messageInfo_Histogram.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Histogram proto.InternalMessageInfo
+
+type isHistogram_Count interface {
+	isHistogram_Count()
+	Equal(interface{}) bool
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+type isHistogram_ZeroCount interface {
+	isHistogram_ZeroCount()
+	Equal(interface{}) bool
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+
+type Histogram_CountInt struct {
+	CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof"`
+}
+type Histogram_CountFloat struct {
+	CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof"`
+}
+type Histogram_ZeroCountInt struct {
+	ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof"`
+}
+type Histogram_ZeroCountFloat struct {
+	ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof"`
+}
+
+func (*Histogram_CountInt) isHistogram_Count()           {}
+func (*Histogram_CountFloat) isHistogram_Count()         {}
+func (*Histogram_ZeroCountInt) isHistogram_ZeroCount()   {}
+func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {}
+
+func (m *Histogram) GetCount() isHistogram_Count {
+	if m != nil {
+		return m.Count
+	}
+	return nil
+}
+func (m *Histogram) GetZeroCount() isHistogram_ZeroCount {
+	if m != nil {
+		return m.ZeroCount
+	}
+	return nil
+}
+
+func (m *Histogram) GetCountInt() uint64 {
+	if x, ok := m.GetCount().(*Histogram_CountInt); ok {
+		return x.CountInt
+	}
+	return 0
+}
+
+func (m *Histogram) GetCountFloat() float64 {
+	if x, ok := m.GetCount().(*Histogram_CountFloat); ok {
+		return x.CountFloat
+	}
+	return 0
+}
+
+func (m *Histogram) GetSum() float64 {
+	if m != nil {
+		return m.Sum
+	}
+	return 0
+}
+
+func (m *Histogram) GetSchema() int32 {
+	if m != nil {
+		return m.Schema
+	}
+	return 0
+}
+
+func (m *Histogram) GetZeroThreshold() float64 {
+	if m != nil {
+		return m.ZeroThreshold
+	}
+	return 0
+}
+
+func (m *Histogram) GetZeroCountInt() uint64 {
+	if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok {
+		return x.ZeroCountInt
+	}
+	return 0
+}
+
+func (m *Histogram) GetZeroCountFloat() float64 {
+	if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok {
+		return x.ZeroCountFloat
+	}
+	return 0
+}
+
+func (m *Histogram) GetNegativeSpans() []BucketSpan {
+	if m != nil {
+		return m.NegativeSpans
+	}
+	return nil
+}
+
+func (m *Histogram) GetNegativeDeltas() []int64 {
+	if m != nil {
+		return m.NegativeDeltas
+	}
+	return nil
+}
+
+func (m *Histogram) GetNegativeCounts() []float64 {
+	if m != nil {
+		return m.NegativeCounts
+	}
+	return nil
+}
+
+func (m *Histogram) GetPositiveSpans() []BucketSpan {
+	if m != nil {
+		return m.PositiveSpans
+	}
+	return nil
+}
+
+func (m *Histogram) GetPositiveDeltas() []int64 {
+	if m != nil {
+		return m.PositiveDeltas
+	}
+	return nil
+}
+
+func (m *Histogram) GetPositiveCounts() []float64 {
+	if m != nil {
+		return m.PositiveCounts
+	}
+	return nil
+}
+
+func (m *Histogram) GetResetHint() Histogram_ResetHint {
+	if m != nil {
+		return m.ResetHint
+	}
+	return RESET_HINT_UNSPECIFIED
+}
+
+func (m *Histogram) GetTimestamp() int64 {
+	if m != nil {
+		return m.Timestamp
+	}
+	return 0
+}
+
+func (m *Histogram) GetCustomValues() []float64 {
+	if m != nil {
+		return m.CustomValues
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Histogram) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*Histogram_CountInt)(nil),
+		(*Histogram_CountFloat)(nil),
+		(*Histogram_ZeroCountInt)(nil),
+		(*Histogram_ZeroCountFloat)(nil),
+	}
+}
+
+// A BucketSpan defines a number of consecutive buckets with their
+// offset. Logically, it would be more straightforward to include the
+// bucket counts in the Span. However, the protobuf representation is
+// more compact in the way the data is structured here (with all the
+// buckets in a single array separate from the Spans).
+type BucketSpan struct {
+	Offset int32  `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
+	Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"`
+}
+
+func (m *BucketSpan) Reset()      { *m = BucketSpan{} }
+func (*BucketSpan) ProtoMessage() {}
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+	return fileDescriptor_affad2b75b7d03df, []int{7}
+}
+func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *BucketSpan) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BucketSpan.Merge(m, src)
+}
+func (m *BucketSpan) XXX_Size() int {
+	return m.Size()
+}
+func (m *BucketSpan) XXX_DiscardUnknown() {
+	xxx_messageInfo_BucketSpan.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
+
+func (m *BucketSpan) GetOffset() int32 {
+	if m != nil {
+		return m.Offset
+	}
+	return 0
+}
+
+func (m *BucketSpan) GetLength() uint32 {
+	if m != nil {
+		return m.Length
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterEnum("cortexpbv2.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value)
+	proto.RegisterEnum("cortexpbv2.Metadata_MetricType", Metadata_MetricType_name, Metadata_MetricType_value)
+	proto.RegisterEnum("cortexpbv2.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value)
+	proto.RegisterType((*WriteRequest)(nil), "cortexpbv2.WriteRequest")
+	proto.RegisterType((*WriteResponse)(nil), "cortexpbv2.WriteResponse")
+	proto.RegisterType((*TimeSeries)(nil), "cortexpbv2.TimeSeries")
+	proto.RegisterType((*Exemplar)(nil), "cortexpbv2.Exemplar")
+	proto.RegisterType((*Sample)(nil), "cortexpbv2.Sample")
+	proto.RegisterType((*Metadata)(nil), "cortexpbv2.Metadata")
+	proto.RegisterType((*Histogram)(nil), "cortexpbv2.Histogram")
+	proto.RegisterType((*BucketSpan)(nil), "cortexpbv2.BucketSpan")
+}
+
+func init() { proto.RegisterFile("cortexv2.proto", fileDescriptor_affad2b75b7d03df) }
+
+var fileDescriptor_affad2b75b7d03df = []byte{
+	// 1094 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x4d, 0x6f, 0x1b, 0x45,
+	0x18, 0xf6, 0x78, 0x1d, 0x7f, 0xbc, 0x89, 0xdd, 0xcd, 0xe0, 0xb6, 0xdb, 0x50, 0x36, 0xc6, 0x15,
+	0x60, 0x81, 0x14, 0x24, 0x57, 0x42, 0x48, 0xad, 0x10, 0x71, 0xba, 0x89, 0x8d, 0x1a, 0x27, 0x1a,
+	0x6f, 0x82, 0xc2, 0x65, 0xb5, 0xb1, 0xc7, 0xf6, 0xaa, 0xfb, 0xc5, 0xce, 0x38, 0x6a, 0x38, 0xf1,
+	0x13, 0xf8, 0x09, 0x3d, 0xf2, 0x27, 0xb8, 0xf7, 0x98, 0x0b, 0x52, 0x85, 0x44, 0x45, 0x9c, 0x4b,
+	0x8f, 0xfd, 0x09, 0x68, 0x67, 0x3f, 0x93, 0x80, 0xe0, 0x36, 0xef, 0xf3, 0x3e, 0xcf, 0xec, 0x33,
+	0xcf, 0xcc, 0x6b, 0x19, 0x1a, 0x63, 0x2f, 0xe0, 0xf4, 0xe5, 0x59, 0x77, 0xcb, 0x0f, 0x3c, 0xee,
+	0x61, 0x88, 0x6a, 0xff, 0xf4, 0xac, 0xbb, 0xd1, 0x9c, 0x79, 0x33, 0x4f, 0xc0, 0x5f, 0x86, 0xab,
+	0x88, 0xd1, 0x7e, 0x55, 0x84, 0xb5, 0xef, 0x03, 0x8b, 0x53, 0x42, 0x7f, 0x5c, 0x50, 0xc6, 0xf1,
+	0x13, 0x28, 0x8f, 0xbc, 0x45, 0x30, 0xa6, 0x8a, 0xd4, 0x42, 0x9d, 0x46, 0xf7, 0xd1, 0x56, 0xb6,
+	0xc7, 0x56, 0x9e, 0xb9, 0x15, 0xd1, 0x34, 0x77, 0xe1, 0x90, 0x58, 0x82, 0x15, 0xa8, 0xb0, 0x73,
+	0xe7, 0xd4, 0xb3, 0x99, 0x52, 0x6a, 0x49, 0x9d, 0x1a, 0x49, 0x4a, 0xac, 0x03, 0x70, 0xcb, 0xa1,
+	0x8c, 0x06, 0x16, 0x65, 0xca, 0x4a, 0x4b, 0xea, 0xac, 0x76, 0xef, 0xe5, 0xb7, 0xd6, 0x2d, 0x87,
+	0x8e, 0x44, 0xb7, 0xf7, 0xf0, 0xf5, 0xdb, 0xcd, 0xc2, 0x1f, 0x6f, 0x37, 0x9b, 0x87, 0x01, 0x35,
+	0x6d, 0xdb, 0x1b, 0xeb, 0xa9, 0xf2, 0xb8, 0x4b, 0x72, 0xfb, 0xe0, 0xa7, 0xb0, 0xc1, 0x5e, 0x58,
+	0xbe, 0x61, 0x9b, 0xa7, 0xd4, 0x36, 0x5c, 0xd3, 0xa1, 0xc6, 0x99, 0x69, 0x5b, 0x13, 0x93, 0x5b,
+	0x9e, 0xab, 0xbc, 0xab, 0xb4, 0x50, 0xa7, 0x4a, 0xee, 0x87, 0x94, 0xe7, 0x21, 0x63, 0x68, 0x3a,
+	0xf4, 0x38, 0xed, 0xb7, 0x37, 0x01, 0xb2, 0x33, 0xe0, 0x0a, 0x48, 0xdb, 0x87, 0x03, 0xb9, 0x80,
+	0xab, 0x50, 0x22, 0x47, 0xcf, 0x35, 0x19, 0x7d, 0x57, 0xaa, 0x22, 0x59, 0x6a, 0xcf, 0xa0, 0x1e,
+	0x9f, 0x9b, 0xf9, 0x9e, 0xcb, 0xc4, 0x29, 0x47, 0xa6, 0xe3, 0xdb, 0x94, 0x29, 0xa8, 0x85, 0x3a,
+	0x12, 0x49, 0x4a, 0xac, 0x02, 0xf4, 0x2d, 0xc6, 0xbd, 0x59, 0x60, 0x3a, 0x4c, 0x29, 0x8a, 0x66,
+	0x0e, 0xc1, 0x0f, 0xa1, 0xa6, 0xbd, 0xa4, 0x8e, 0x6f, 0x9b, 0x01, 0x13, 0xf9, 0x4a, 0x24, 0x03,
+	0xda, 0xbf, 0x15, 0x01, 0xb2, 0x18, 0xf0, 0x26, 0xac, 0x8a, 0x73, 0x31, 0x23, 0xa0, 0xd3, 0xf0,
+	0x53, 0x52, 0xa7, 0x4e, 0x20, 0x82, 0x08, 0x9d, 0x32, 0xdc, 0x85, 0x0a, 0x8b, 0x7d, 0x14, 0x45,
+	0xa0, 0x38, 0x1f, 0x68, 0xe4, 0xa9, 0x57, 0x0a, 0xc3, 0x24, 0x09, 0x11, 0x3f, 0x01, 0x98, 0x67,
+	0x0e, 0x25, 0x21, 0xbb, 0x9b, 0x97, 0xa5, 0x6e, 0x63, 0x65, 0x8e, 0x8e, 0xbf, 0x86, 0x1a, 0x4d,
+	0xed, 0x97, 0x84, 0xb6, 0x99, 0xd7, 0x26, 0x47, 0x89, 0xa5, 0x19, 0x19, 0x7f, 0x05, 0x55, 0x87,
+	0x72, 0x73, 0x62, 0x72, 0x53, 0x59, 0x69, 0xa1, 0x9b, 0xc2, 0xfd, 0xb8, 0x17, 0x0b, 0x53, 0x2e,
+	0xfe, 0x02, 0xd6, 0xc7, 0x01, 0x35, 0x39, 0x9d, 0x18, 0xe2, 0xda, 0xb9, 0xe9, 0xf8, 0x4a, 0x59,
+	0x04, 0x27, 0xc7, 0x0d, 0x3d, 0xc1, 0xdb, 0x06, 0x54, 0x13, 0x07, 0xff, 0x1d, 0x5e, 0x13, 0x56,
+	0xce, 0x4c, 0x7b, 0x41, 0xc5, 0x2d, 0x21, 0x12, 0x15, 0xe1, 0x05, 0x65, 0xdf, 0x89, 0x2f, 0x28,
+	0x05, 0xda, 0x4f, 0xa1, 0x1c, 0xa5, 0x9a, 0xa9, 0xd1, 0xbf, 0xaa, 0x8b, 0x37, 0xd5, 0xbf, 0x17,
+	0xa1, 0x9a, 0x1c, 0x14, 0x3f, 0x86, 0x12, 0x3f, 0xf7, 0x23, 0x7d, 0xa3, 0xbb, 0xf9, 0x4f, 0x61,
+	0x84, 0x8b, 0xc0, 0x1a, 0xeb, 0xe7, 0x3e, 0x25, 0x82, 0x8c, 0x1f, 0x40, 0x75, 0x4e, 0x6d, 0x3f,
+	0x3c, 0x92, 0x30, 0x57, 0x27, 0x95, 0xb0, 0x26, 0x74, 0x1a, 0xb6, 0x16, 0xae, 0xc5, 0x45, 0xab,
+	0x14, 0xb5, 0xc2, 0x9a, 0xd0, 0x69, 0xfb, 0x4f, 0x04, 0x90, 0x6d, 0x85, 0x3f, 0x84, 0xfb, 0xfb,
+	0x9a, 0x4e, 0x06, 0x3b, 0x86, 0x7e, 0x72, 0xa8, 0x19, 0x47, 0xc3, 0xd1, 0xa1, 0xb6, 0x33, 0xd8,
+	0x1d, 0x68, 0xcf, 0xe4, 0x02, 0xbe, 0x0f, 0x1f, 0xe4, 0x9b, 0x3b, 0x07, 0x47, 0x43, 0x5d, 0x23,
+	0x32, 0xc2, 0x77, 0x61, 0x3d, 0xdf, 0xd8, 0xdb, 0x3e, 0xda, 0xd3, 0xe4, 0x22, 0x7e, 0x00, 0x77,
+	0xf3, 0x70, 0x7f, 0x30, 0xd2, 0x0f, 0xf6, 0xc8, 0xf6, 0xbe, 0x2c, 0x61, 0x15, 0x36, 0x6e, 0x29,
+	0xb2, 0x7e, 0xe9, 0xe6, 0xa7, 0x46, 0x47, 0xfb, 0xfb, 0xdb, 0xe4, 0x44, 0x5e, 0xc1, 0x4d, 0x90,
+	0xf3, 0x8d, 0xc1, 0x70, 0xf7, 0x40, 0x2e, 0x63, 0x05, 0x9a, 0xd7, 0xe8, 0xfa, 0xb6, 0xae, 0x8d,
+	0x34, 0x5d, 0xae, 0xb4, 0x5f, 0x95, 0xa1, 0x96, 0xbe, 0x5a, 0xfc, 0x11, 0xd4, 0xc6, 0xde, 0xc2,
+	0xe5, 0x86, 0xe5, 0x72, 0x91, 0x6e, 0xa9, 0x5f, 0x20, 0x55, 0x01, 0x0d, 0x5c, 0x8e, 0x3f, 0x86,
+	0xd5, 0xa8, 0x3d, 0xb5, 0x3d, 0x93, 0x47, 0x97, 0xdf, 0x2f, 0x10, 0x10, 0xe0, 0x6e, 0x88, 0x61,
+	0x19, 0x24, 0xb6, 0x70, 0x44, 0xc0, 0x88, 0x84, 0x4b, 0x7c, 0x0f, 0xca, 0x6c, 0x3c, 0xa7, 0x8e,
+	0x29, 0xa2, 0x5d, 0x27, 0x71, 0x85, 0x3f, 0x81, 0xc6, 0x4f, 0x34, 0xf0, 0x0c, 0x3e, 0x0f, 0x28,
+	0x9b, 0x7b, 0xf6, 0x44, 0xbc, 0x6d, 0x44, 0xea, 0x21, 0xaa, 0x27, 0x20, 0xfe, 0x34, 0xa6, 0x65,
+	0xbe, 0xca, 0xc2, 0x17, 0x22, 0x6b, 0x21, 0xbe, 0x93, 0x78, 0xfb, 0x1c, 0xe4, 0x1c, 0x2f, 0x32,
+	0x58, 0x11, 0x06, 0x11, 0x69, 0xa4, 0xcc, 0xc8, 0xe4, 0x0e, 0x34, 0x5c, 0x3a, 0x33, 0xb9, 0x75,
+	0x46, 0x0d, 0xe6, 0x9b, 0x2e, 0x53, 0xaa, 0xb7, 0x7f, 0x53, 0x7b, 0x8b, 0xf1, 0x0b, 0xca, 0x47,
+	0xbe, 0xe9, 0xc6, 0x83, 0x55, 0x4f, 0x34, 0x21, 0xc6, 0xf0, 0x67, 0x70, 0x27, 0xdd, 0x64, 0x42,
+	0x6d, 0x6e, 0x32, 0xa5, 0xd6, 0x92, 0x3a, 0x98, 0xa4, 0x7b, 0x3f, 0x13, 0xe8, 0x35, 0xa2, 0x70,
+	0xc7, 0x14, 0x68, 0x49, 0x1d, 0x94, 0x11, 0x85, 0x35, 0x16, 0xda, 0xf2, 0x3d, 0x66, 0xe5, 0x6c,
+	0xad, 0xfe, 0x1f, 0x5b, 0x89, 0x26, 0xb5, 0x95, 0x6e, 0x12, 0xdb, 0x5a, 0x8b, 0x6c, 0x25, 0x70,
+	0x66, 0x2b, 0x25, 0xc6, 0xb6, 0xea, 0x91, 0xad, 0x04, 0x8e, 0x6d, 0x7d, 0x03, 0x10, 0x50, 0x46,
+	0xb9, 0x31, 0x0f, 0xd3, 0x6f, 0xdc, 0x9e, 0xb9, 0xf4, 0xfd, 0x6c, 0x91, 0x90, 0xd7, 0xb7, 0x5c,
+	0x4e, 0x6a, 0x41, 0xb2, 0xbc, 0x3e, 0xd8, 0x77, 0x6e, 0x0c, 0x36, 0x7e, 0x04, 0xf5, 0xf1, 0x82,
+	0x71, 0xcf, 0x31, 0xc4, 0xcf, 0x00, 0x53, 0x64, 0x61, 0x62, 0x2d, 0x02, 0x8f, 0x05, 0xd6, 0x9e,
+	0x40, 0x2d, 0xdd, 0x1a, 0x6f, 0xc0, 0x3d, 0x12, 0xbe, 0x5e, 0xa3, 0x3f, 0x18, 0xea, 0x37, 0x46,
+	0x10, 0x43, 0x23, 0xd7, 0x3b, 0xd1, 0x46, 0x32, 0xc2, 0xeb, 0x50, 0xcf, 0x61, 0xc3, 0x03, 0xb9,
+	0x18, 0x4e, 0x49, 0x0e, 0x8a, 0xe6, 0x51, 0xea, 0x55, 0x60, 0x45, 0x04, 0xd1, 0x5b, 0x03, 0xc8,
+	0xde, 0x52, 0xfb, 0x29, 0x40, 0x16, 0x7a, 0xf8, 0x9c, 0xbd, 0xe9, 0x94, 0xd1, 0x68, 0x3e, 0xd6,
+	0x49, 0x5c, 0x85, 0xb8, 0x4d, 0xdd, 0x19, 0x9f, 0x8b, 0xb1, 0xa8, 0x93, 0xb8, 0xea, 0x7d, 0x7b,
+	0x71, 0xa9, 0x16, 0xde, 0x5c, 0xaa, 0x85, 0xf7, 0x97, 0x2a, 0xfa, 0x79, 0xa9, 0xa2, 0x5f, 0x97,
+	0x2a, 0x7a, 0xbd, 0x54, 0xd1, 0xc5, 0x52, 0x45, 0x7f, 0x2d, 0x55, 0xf4, 0x6e, 0xa9, 0x16, 0xde,
+	0x2f, 0x55, 0xf4, 0xcb, 0x95, 0x5a, 0xb8, 0xb8, 0x52, 0x0b, 0x6f, 0xae, 0xd4, 0xc2, 0x0f, 0xb9,
+	0xff, 0x1e, 0xa7, 0x65, 0xf1, 0x67, 0xe3, 0xf1, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x03, 0x9f,
+	0x7c, 0x56, 0xa0, 0x08, 0x00, 0x00,
+}
+
+func (x WriteRequest_SourceEnum) String() string {
+	s, ok := WriteRequest_SourceEnum_name[int32(x)]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(x))
+}
+func (x Metadata_MetricType) String() string {
+	s, ok := Metadata_MetricType_name[int32(x)]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(x))
+}
+func (x Histogram_ResetHint) String() string {
+	s, ok := Histogram_ResetHint_name[int32(x)]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(x))
+}
+func (this *WriteRequest) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*WriteRequest)
+	if !ok {
+		that2, ok := that.(WriteRequest)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if this.Source != that1.Source {
+		return false
+	}
+	if len(this.Symbols) != len(that1.Symbols) {
+		return false
+	}
+	for i := range this.Symbols {
+		if this.Symbols[i] != that1.Symbols[i] {
+			return false
+		}
+	}
+	if len(this.Timeseries) != len(that1.Timeseries) {
+		return false
+	}
+	for i := range this.Timeseries {
+		if !this.Timeseries[i].Equal(that1.Timeseries[i]) {
+			return false
+		}
+	}
+	if this.SkipLabelNameValidation != that1.SkipLabelNameValidation {
+		return false
+	}
+	return true
+}
+func (this *WriteResponse) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*WriteResponse)
+	if !ok {
+		that2, ok := that.(WriteResponse)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if this.Samples != that1.Samples {
+		return false
+	}
+	if this.Histograms != that1.Histograms {
+		return false
+	}
+	if this.Exemplars != that1.Exemplars {
+		return false
+	}
+	return true
+}
+func (this *TimeSeries) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*TimeSeries)
+	if !ok {
+		that2, ok := that.(TimeSeries)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if len(this.LabelsRefs) != len(that1.LabelsRefs) {
+		return false
+	}
+	for i := range this.LabelsRefs {
+		if this.LabelsRefs[i] != that1.LabelsRefs[i] {
+			return false
+		}
+	}
+	if len(this.Samples) != len(that1.Samples) {
+		return false
+	}
+	for i := range this.Samples {
+		if !this.Samples[i].Equal(&that1.Samples[i]) {
+			return false
+		}
+	}
+	if len(this.Histograms) != len(that1.Histograms) {
+		return false
+	}
+	for i := range this.Histograms {
+		if !this.Histograms[i].Equal(&that1.Histograms[i]) {
+			return false
+		}
+	}
+	if len(this.Exemplars) != len(that1.Exemplars) {
+		return false
+	}
+	for i := range this.Exemplars {
+		if !this.Exemplars[i].Equal(&that1.Exemplars[i]) {
+			return false
+		}
+	}
+	if !this.Metadata.Equal(&that1.Metadata) {
+		return false
+	}
+	if this.CreatedTimestamp != that1.CreatedTimestamp {
+		return false
+	}
+	return true
+}
+func (this *Exemplar) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*Exemplar)
+	if !ok {
+		that2, ok := that.(Exemplar)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if len(this.LabelsRefs) != len(that1.LabelsRefs) {
+		return false
+	}
+	for i := range this.LabelsRefs {
+		if this.LabelsRefs[i] != that1.LabelsRefs[i] {
+			return false
+		}
+	}
+	if this.Value != that1.Value {
+		return false
+	}
+	if this.Timestamp != that1.Timestamp {
+		return false
+	}
+	return true
+}
+func (this *Sample) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*Sample)
+	if !ok {
+		that2, ok := that.(Sample)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if this.Value != that1.Value {
+		return false
+	}
+	if this.Timestamp != that1.Timestamp {
+		return false
+	}
+	return true
+}
+func (this *Metadata) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*Metadata)
+	if !ok {
+		that2, ok := that.(Metadata)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if this.Type != that1.Type {
+		return false
+	}
+	if this.HelpRef != that1.HelpRef {
+		return false
+	}
+	if this.UnitRef != that1.UnitRef {
+		return false
+	}
+	return true
+}
+func (this *Histogram) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*Histogram)
+	if !ok {
+		that2, ok := that.(Histogram)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if that1.Count == nil {
+		if this.Count != nil {
+			return false
+		}
+	} else if this.Count == nil {
+		return false
+	} else if !this.Count.Equal(that1.Count) {
+		return false
+	}
+	if this.Sum != that1.Sum {
+		return false
+	}
+	if this.Schema != that1.Schema {
+		return false
+	}
+	if this.ZeroThreshold != that1.ZeroThreshold {
+		return false
+	}
+	if that1.ZeroCount == nil {
+		if this.ZeroCount != nil {
+			return false
+		}
+	} else if this.ZeroCount == nil {
+		return false
+	} else if !this.ZeroCount.Equal(that1.ZeroCount) {
+		return false
+	}
+	if len(this.NegativeSpans) != len(that1.NegativeSpans) {
+		return false
+	}
+	for i := range this.NegativeSpans {
+		if !this.NegativeSpans[i].Equal(&that1.NegativeSpans[i]) {
+			return false
+		}
+	}
+	if len(this.NegativeDeltas) != len(that1.NegativeDeltas) {
+		return false
+	}
+	for i := range this.NegativeDeltas {
+		if this.NegativeDeltas[i] != that1.NegativeDeltas[i] {
+			return false
+		}
+	}
+	if len(this.NegativeCounts) != len(that1.NegativeCounts) {
+		return false
+	}
+	for i := range this.NegativeCounts {
+		if this.NegativeCounts[i] != that1.NegativeCounts[i] {
+			return false
+		}
+	}
+	if len(this.PositiveSpans) != len(that1.PositiveSpans) {
+		return false
+	}
+	for i := range this.PositiveSpans {
+		if !this.PositiveSpans[i].Equal(&that1.PositiveSpans[i]) {
+			return false
+		}
+	}
+	if len(this.PositiveDeltas) != len(that1.PositiveDeltas) {
+		return false
+	}
+	for i := range this.PositiveDeltas {
+		if this.PositiveDeltas[i] != that1.PositiveDeltas[i] {
+			return false
+		}
+	}
+	if len(this.PositiveCounts) != len(that1.PositiveCounts) {
+		return false
+	}
+	for i := range this.PositiveCounts {
+		if this.PositiveCounts[i] != that1.PositiveCounts[i] {
+			return false
+		}
+	}
+	if this.ResetHint != that1.ResetHint {
+		return false
+	}
+	if this.Timestamp != that1.Timestamp {
+		return false
+	}
+	if len(this.CustomValues) != len(that1.CustomValues) {
+		return false
+	}
+	for i := range this.CustomValues {
+		if this.CustomValues[i] != that1.CustomValues[i] {
+			return false
+		}
+	}
+	return true
+}
+func (this *Histogram_CountInt) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*Histogram_CountInt)
+	if !ok {
+		that2, ok := that.(Histogram_CountInt)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if this.CountInt != that1.CountInt {
+		return false
+	}
+	return true
+}
+func (this *Histogram_CountFloat) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*Histogram_CountFloat)
+	if !ok {
+		that2, ok := that.(Histogram_CountFloat)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if this.CountFloat != that1.CountFloat {
+		return false
+	}
+	return true
+}
+func (this *Histogram_ZeroCountInt) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*Histogram_ZeroCountInt)
+	if !ok {
+		that2, ok := that.(Histogram_ZeroCountInt)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if this.ZeroCountInt != that1.ZeroCountInt {
+		return false
+	}
+	return true
+}
+func (this *Histogram_ZeroCountFloat) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*Histogram_ZeroCountFloat)
+	if !ok {
+		that2, ok := that.(Histogram_ZeroCountFloat)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if this.ZeroCountFloat != that1.ZeroCountFloat {
+		return false
+	}
+	return true
+}
+func (this *BucketSpan) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*BucketSpan)
+	if !ok {
+		that2, ok := that.(BucketSpan)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if this.Offset != that1.Offset {
+		return false
+	}
+	if this.Length != that1.Length {
+		return false
+	}
+	return true
+}
+func (this *WriteRequest) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 8)
+	s = append(s, "&cortexpbv2.WriteRequest{")
+	s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n")
+	s = append(s, "Symbols: "+fmt.Sprintf("%#v", this.Symbols)+",\n")
+	s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n")
+	s = append(s, "SkipLabelNameValidation: "+fmt.Sprintf("%#v", this.SkipLabelNameValidation)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *WriteResponse) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&cortexpbv2.WriteResponse{")
+	s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n")
+	s = append(s, "Histograms: "+fmt.Sprintf("%#v", this.Histograms)+",\n")
+	s = append(s, "Exemplars: "+fmt.Sprintf("%#v", this.Exemplars)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *TimeSeries) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 10)
+	s = append(s, "&cortexpbv2.TimeSeries{")
+	s = append(s, "LabelsRefs: "+fmt.Sprintf("%#v", this.LabelsRefs)+",\n")
+	if this.Samples != nil {
+		vs := make([]*Sample, len(this.Samples))
+		for i := range vs {
+			vs[i] = &this.Samples[i]
+		}
+		s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n")
+	}
+	if this.Histograms != nil {
+		vs := make([]*Histogram, len(this.Histograms))
+		for i := range vs {
+			vs[i] = &this.Histograms[i]
+		}
+		s = append(s, "Histograms: "+fmt.Sprintf("%#v", vs)+",\n")
+	}
+	if this.Exemplars != nil {
+		vs := make([]*Exemplar, len(this.Exemplars))
+		for i := range vs {
+			vs[i] = &this.Exemplars[i]
+		}
+		s = append(s, "Exemplars: "+fmt.Sprintf("%#v", vs)+",\n")
+	}
+	s = append(s, "Metadata: "+strings.Replace(this.Metadata.GoString(), `&`, ``, 1)+",\n")
+	s = append(s, "CreatedTimestamp: "+fmt.Sprintf("%#v", this.CreatedTimestamp)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *Exemplar) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&cortexpbv2.Exemplar{")
+	s = append(s, "LabelsRefs: "+fmt.Sprintf("%#v", this.LabelsRefs)+",\n")
+	s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+	s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *Sample) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 6)
+	s = append(s, "&cortexpbv2.Sample{")
+	s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+	s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *Metadata) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&cortexpbv2.Metadata{")
+	s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
+	s = append(s, "HelpRef: "+fmt.Sprintf("%#v", this.HelpRef)+",\n")
+	s = append(s, "UnitRef: "+fmt.Sprintf("%#v", this.UnitRef)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *Histogram) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 20)
+	s = append(s, "&cortexpbv2.Histogram{")
+	if this.Count != nil {
+		s = append(s, "Count: "+fmt.Sprintf("%#v", this.Count)+",\n")
+	}
+	s = append(s, "Sum: "+fmt.Sprintf("%#v", this.Sum)+",\n")
+	s = append(s, "Schema: "+fmt.Sprintf("%#v", this.Schema)+",\n")
+	s = append(s, "ZeroThreshold: "+fmt.Sprintf("%#v", this.ZeroThreshold)+",\n")
+	if this.ZeroCount != nil {
+		s = append(s, "ZeroCount: "+fmt.Sprintf("%#v", this.ZeroCount)+",\n")
+	}
+	if this.NegativeSpans != nil {
+		vs := make([]*BucketSpan, len(this.NegativeSpans))
+		for i := range vs {
+			vs[i] = &this.NegativeSpans[i]
+		}
+		s = append(s, "NegativeSpans: "+fmt.Sprintf("%#v", vs)+",\n")
+	}
+	s = append(s, "NegativeDeltas: "+fmt.Sprintf("%#v", this.NegativeDeltas)+",\n")
+	s = append(s, "NegativeCounts: "+fmt.Sprintf("%#v", this.NegativeCounts)+",\n")
+	if this.PositiveSpans != nil {
+		vs := make([]*BucketSpan, len(this.PositiveSpans))
+		for i := range vs {
+			vs[i] = &this.PositiveSpans[i]
+		}
+		s = append(s, "PositiveSpans: "+fmt.Sprintf("%#v", vs)+",\n")
+	}
+	s = append(s, "PositiveDeltas: "+fmt.Sprintf("%#v", this.PositiveDeltas)+",\n")
+	s = append(s, "PositiveCounts: "+fmt.Sprintf("%#v", this.PositiveCounts)+",\n")
+	s = append(s, "ResetHint: "+fmt.Sprintf("%#v", this.ResetHint)+",\n")
+	s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
+	s = append(s, "CustomValues: "+fmt.Sprintf("%#v", this.CustomValues)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *Histogram_CountInt) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&cortexpbv2.Histogram_CountInt{` +
+		`CountInt:` + fmt.Sprintf("%#v", this.CountInt) + `}`}, ", ")
+	return s
+}
+func (this *Histogram_CountFloat) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&cortexpbv2.Histogram_CountFloat{` +
+		`CountFloat:` + fmt.Sprintf("%#v", this.CountFloat) + `}`}, ", ")
+	return s
+}
+func (this *Histogram_ZeroCountInt) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&cortexpbv2.Histogram_ZeroCountInt{` +
+		`ZeroCountInt:` + fmt.Sprintf("%#v", this.ZeroCountInt) + `}`}, ", ")
+	return s
+}
+func (this *Histogram_ZeroCountFloat) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&cortexpbv2.Histogram_ZeroCountFloat{` +
+		`ZeroCountFloat:` + fmt.Sprintf("%#v", this.ZeroCountFloat) + `}`}, ", ")
+	return s
+}
+func (this *BucketSpan) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 6)
+	s = append(s, "&cortexpbv2.BucketSpan{")
+	s = append(s, "Offset: "+fmt.Sprintf("%#v", this.Offset)+",\n")
+	s = append(s, "Length: "+fmt.Sprintf("%#v", this.Length)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func valueToGoStringCortexv2(v interface{}, typ string) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.SkipLabelNameValidation {
+		i--
+		if m.SkipLabelNameValidation {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x3e
+		i--
+		dAtA[i] = 0xc0
+	}
+	if len(m.Timeseries) > 0 {
+		for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size := m.Timeseries[iNdEx].Size()
+				i -= size
+				if _, err := m.Timeseries[iNdEx].MarshalTo(dAtA[i:]); err != nil {
+					return 0, err
+				}
+				i = encodeVarintCortexv2(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	if len(m.Symbols) > 0 {
+		for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Symbols[iNdEx])
+			copy(dAtA[i:], m.Symbols[iNdEx])
+			i = encodeVarintCortexv2(dAtA, i, uint64(len(m.Symbols[iNdEx])))
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if m.Source != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64(m.Source))
+		i--
+		dAtA[i] = 0x18
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WriteResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WriteResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WriteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Exemplars != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64(m.Exemplars))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.Histograms != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64(m.Histograms))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Samples != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64(m.Samples))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *TimeSeries) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.CreatedTimestamp != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64(m.CreatedTimestamp))
+		i--
+		dAtA[i] = 0x30
+	}
+	{
+		size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintCortexv2(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x2a
+	if len(m.Exemplars) > 0 {
+		for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintCortexv2(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if len(m.Histograms) > 0 {
+		for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintCortexv2(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Samples) > 0 {
+		for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintCortexv2(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.LabelsRefs) > 0 {
+		dAtA3 := make([]byte, len(m.LabelsRefs)*10)
+		var j2 int
+		for _, num := range m.LabelsRefs {
+			for num >= 1<<7 {
+				dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80)
+				num >>= 7
+				j2++
+			}
+			dAtA3[j2] = uint8(num)
+			j2++
+		}
+		i -= j2
+		copy(dAtA[i:], dAtA3[:j2])
+		i = encodeVarintCortexv2(dAtA, i, uint64(j2))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Exemplar) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Timestamp != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64(m.Timestamp))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.Value != 0 {
+		i -= 8
+		encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
+		i--
+		dAtA[i] = 0x11
+	}
+	if len(m.LabelsRefs) > 0 {
+		dAtA5 := make([]byte, len(m.LabelsRefs)*10)
+		var j4 int
+		for _, num := range m.LabelsRefs {
+			for num >= 1<<7 {
+				dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80)
+				num >>= 7
+				j4++
+			}
+			dAtA5[j4] = uint8(num)
+			j4++
+		}
+		i -= j4
+		copy(dAtA[i:], dAtA5[:j4])
+		i = encodeVarintCortexv2(dAtA, i, uint64(j4))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Sample) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Sample) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Timestamp != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64(m.Timestamp))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Value != 0 {
+		i -= 8
+		encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
+		i--
+		dAtA[i] = 0x9
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Metadata) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.UnitRef != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64(m.UnitRef))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.HelpRef != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64(m.HelpRef))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.Type != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64(m.Type))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Histogram) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Histogram) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.CustomValues) > 0 {
+		for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- {
+			f6 := math.Float64bits(float64(m.CustomValues[iNdEx]))
+			i -= 8
+			encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6))
+		}
+		i = encodeVarintCortexv2(dAtA, i, uint64(len(m.CustomValues)*8))
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x82
+	}
+	if m.Timestamp != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64(m.Timestamp))
+		i--
+		dAtA[i] = 0x78
+	}
+	if m.ResetHint != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64(m.ResetHint))
+		i--
+		dAtA[i] = 0x70
+	}
+	if len(m.PositiveCounts) > 0 {
+		for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- {
+			f7 := math.Float64bits(float64(m.PositiveCounts[iNdEx]))
+			i -= 8
+			encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f7))
+		}
+		i = encodeVarintCortexv2(dAtA, i, uint64(len(m.PositiveCounts)*8))
+		i--
+		dAtA[i] = 0x6a
+	}
+	if len(m.PositiveDeltas) > 0 {
+		var j8 int
+		dAtA10 := make([]byte, len(m.PositiveDeltas)*10)
+		for _, num := range m.PositiveDeltas {
+			x9 := (uint64(num) << 1) ^ uint64((num >> 63))
+			for x9 >= 1<<7 {
+				dAtA10[j8] = uint8(uint64(x9)&0x7f | 0x80)
+				j8++
+				x9 >>= 7
+			}
+			dAtA10[j8] = uint8(x9)
+			j8++
+		}
+		i -= j8
+		copy(dAtA[i:], dAtA10[:j8])
+		i = encodeVarintCortexv2(dAtA, i, uint64(j8))
+		i--
+		dAtA[i] = 0x62
+	}
+	if len(m.PositiveSpans) > 0 {
+		for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintCortexv2(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x5a
+		}
+	}
+	if len(m.NegativeCounts) > 0 {
+		for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- {
+			f11 := math.Float64bits(float64(m.NegativeCounts[iNdEx]))
+			i -= 8
+			encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f11))
+		}
+		i = encodeVarintCortexv2(dAtA, i, uint64(len(m.NegativeCounts)*8))
+		i--
+		dAtA[i] = 0x52
+	}
+	if len(m.NegativeDeltas) > 0 {
+		var j12 int
+		dAtA14 := make([]byte, len(m.NegativeDeltas)*10)
+		for _, num := range m.NegativeDeltas {
+			x13 := (uint64(num) << 1) ^ uint64((num >> 63))
+			for x13 >= 1<<7 {
+				dAtA14[j12] = uint8(uint64(x13)&0x7f | 0x80)
+				j12++
+				x13 >>= 7
+			}
+			dAtA14[j12] = uint8(x13)
+			j12++
+		}
+		i -= j12
+		copy(dAtA[i:], dAtA14[:j12])
+		i = encodeVarintCortexv2(dAtA, i, uint64(j12))
+		i--
+		dAtA[i] = 0x4a
+	}
+	if len(m.NegativeSpans) > 0 {
+		for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintCortexv2(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x42
+		}
+	}
+	if m.ZeroCount != nil {
+		{
+			size := m.ZeroCount.Size()
+			i -= size
+			if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil {
+				return 0, err
+			}
+		}
+	}
+	if m.ZeroThreshold != 0 {
+		i -= 8
+		encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold))))
+		i--
+		dAtA[i] = 0x29
+	}
+	if m.Schema != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31))))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.Sum != 0 {
+		i -= 8
+		encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
+		i--
+		dAtA[i] = 0x19
+	}
+	if m.Count != nil {
+		{
+			size := m.Count.Size()
+			i -= size
+			if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil {
+				return 0, err
+			}
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarintCortexv2(dAtA, i, uint64(m.CountInt))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i -= 8
+	encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat))))
+	i--
+	dAtA[i] = 0x11
+	return len(dAtA) - i, nil
+}
+func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarintCortexv2(dAtA, i, uint64(m.ZeroCountInt))
+	i--
+	dAtA[i] = 0x30
+	return len(dAtA) - i, nil
+}
+func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i -= 8
+	encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat))))
+	i--
+	dAtA[i] = 0x39
+	return len(dAtA) - i, nil
+}
+func (m *BucketSpan) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Length != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64(m.Length))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Offset != 0 {
+		i = encodeVarintCortexv2(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31))))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintCortexv2(dAtA []byte, offset int, v uint64) int {
+	offset -= sovCortexv2(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *WriteRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Source != 0 {
+		n += 1 + sovCortexv2(uint64(m.Source))
+	}
+	if len(m.Symbols) > 0 {
+		for _, s := range m.Symbols {
+			l = len(s)
+			n += 1 + l + sovCortexv2(uint64(l))
+		}
+	}
+	if len(m.Timeseries) > 0 {
+		for _, e := range m.Timeseries {
+			l = e.Size()
+			n += 1 + l + sovCortexv2(uint64(l))
+		}
+	}
+	if m.SkipLabelNameValidation {
+		n += 3
+	}
+	return n
+}
+
+func (m *WriteResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Samples != 0 {
+		n += 1 + sovCortexv2(uint64(m.Samples))
+	}
+	if m.Histograms != 0 {
+		n += 1 + sovCortexv2(uint64(m.Histograms))
+	}
+	if m.Exemplars != 0 {
+		n += 1 + sovCortexv2(uint64(m.Exemplars))
+	}
+	return n
+}
+
+func (m *TimeSeries) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.LabelsRefs) > 0 {
+		l = 0
+		for _, e := range m.LabelsRefs {
+			l += sovCortexv2(uint64(e))
+		}
+		n += 1 + sovCortexv2(uint64(l)) + l
+	}
+	if len(m.Samples) > 0 {
+		for _, e := range m.Samples {
+			l = e.Size()
+			n += 1 + l + sovCortexv2(uint64(l))
+		}
+	}
+	if len(m.Histograms) > 0 {
+		for _, e := range m.Histograms {
+			l = e.Size()
+			n += 1 + l + sovCortexv2(uint64(l))
+		}
+	}
+	if len(m.Exemplars) > 0 {
+		for _, e := range m.Exemplars {
+			l = e.Size()
+			n += 1 + l + sovCortexv2(uint64(l))
+		}
+	}
+	l = m.Metadata.Size()
+	n += 1 + l + sovCortexv2(uint64(l))
+	if m.CreatedTimestamp != 0 {
+		n += 1 + sovCortexv2(uint64(m.CreatedTimestamp))
+	}
+	return n
+}
+
+func (m *Exemplar) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.LabelsRefs) > 0 {
+		l = 0
+		for _, e := range m.LabelsRefs {
+			l += sovCortexv2(uint64(e))
+		}
+		n += 1 + sovCortexv2(uint64(l)) + l
+	}
+	if m.Value != 0 {
+		n += 9
+	}
+	if m.Timestamp != 0 {
+		n += 1 + sovCortexv2(uint64(m.Timestamp))
+	}
+	return n
+}
+
+func (m *Sample) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Value != 0 {
+		n += 9
+	}
+	if m.Timestamp != 0 {
+		n += 1 + sovCortexv2(uint64(m.Timestamp))
+	}
+	return n
+}
+
+func (m *Metadata) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Type != 0 {
+		n += 1 + sovCortexv2(uint64(m.Type))
+	}
+	if m.HelpRef != 0 {
+		n += 1 + sovCortexv2(uint64(m.HelpRef))
+	}
+	if m.UnitRef != 0 {
+		n += 1 + sovCortexv2(uint64(m.UnitRef))
+	}
+	return n
+}
+
+func (m *Histogram) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Count != nil {
+		n += m.Count.Size()
+	}
+	if m.Sum != 0 {
+		n += 9
+	}
+	if m.Schema != 0 {
+		n += 1 + sozCortexv2(uint64(m.Schema))
+	}
+	if m.ZeroThreshold != 0 {
+		n += 9
+	}
+	if m.ZeroCount != nil {
+		n += m.ZeroCount.Size()
+	}
+	if len(m.NegativeSpans) > 0 {
+		for _, e := range m.NegativeSpans {
+			l = e.Size()
+			n += 1 + l + sovCortexv2(uint64(l))
+		}
+	}
+	if len(m.NegativeDeltas) > 0 {
+		l = 0
+		for _, e := range m.NegativeDeltas {
+			l += sozCortexv2(uint64(e))
+		}
+		n += 1 + sovCortexv2(uint64(l)) + l
+	}
+	if len(m.NegativeCounts) > 0 {
+		n += 1 + sovCortexv2(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8
+	}
+	if len(m.PositiveSpans) > 0 {
+		for _, e := range m.PositiveSpans {
+			l = e.Size()
+			n += 1 + l + sovCortexv2(uint64(l))
+		}
+	}
+	if len(m.PositiveDeltas) > 0 {
+		l = 0
+		for _, e := range m.PositiveDeltas {
+			l += sozCortexv2(uint64(e))
+		}
+		n += 1 + sovCortexv2(uint64(l)) + l
+	}
+	if len(m.PositiveCounts) > 0 {
+		n += 1 + sovCortexv2(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8
+	}
+	if m.ResetHint != 0 {
+		n += 1 + sovCortexv2(uint64(m.ResetHint))
+	}
+	if m.Timestamp != 0 {
+		n += 1 + sovCortexv2(uint64(m.Timestamp))
+	}
+	if len(m.CustomValues) > 0 {
+		n += 2 + sovCortexv2(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8
+	}
+	return n
+}
+
+func (m *Histogram_CountInt) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovCortexv2(uint64(m.CountInt))
+	return n
+}
+func (m *Histogram_CountFloat) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 9
+	return n
+}
+func (m *Histogram_ZeroCountInt) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovCortexv2(uint64(m.ZeroCountInt))
+	return n
+}
+func (m *Histogram_ZeroCountFloat) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 9
+	return n
+}
+func (m *BucketSpan) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Offset != 0 {
+		n += 1 + sozCortexv2(uint64(m.Offset))
+	}
+	if m.Length != 0 {
+		n += 1 + sovCortexv2(uint64(m.Length))
+	}
+	return n
+}
+
+func sovCortexv2(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozCortexv2(x uint64) (n int) {
+	return sovCortexv2(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *WriteRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&WriteRequest{`,
+		`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
+		`Symbols:` + fmt.Sprintf("%v", this.Symbols) + `,`,
+		`Timeseries:` + fmt.Sprintf("%v", this.Timeseries) + `,`,
+		`SkipLabelNameValidation:` + fmt.Sprintf("%v", this.SkipLabelNameValidation) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *WriteResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&WriteResponse{`,
+		`Samples:` + fmt.Sprintf("%v", this.Samples) + `,`,
+		`Histograms:` + fmt.Sprintf("%v", this.Histograms) + `,`,
+		`Exemplars:` + fmt.Sprintf("%v", this.Exemplars) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TimeSeries) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForSamples := "[]Sample{"
+	for _, f := range this.Samples {
+		repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForSamples += "}"
+	repeatedStringForHistograms := "[]Histogram{"
+	for _, f := range this.Histograms {
+		repeatedStringForHistograms += strings.Replace(strings.Replace(f.String(), "Histogram", "Histogram", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForHistograms += "}"
+	repeatedStringForExemplars := "[]Exemplar{"
+	for _, f := range this.Exemplars {
+		repeatedStringForExemplars += strings.Replace(strings.Replace(f.String(), "Exemplar", "Exemplar", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForExemplars += "}"
+	s := strings.Join([]string{`&TimeSeries{`,
+		`LabelsRefs:` + fmt.Sprintf("%v", this.LabelsRefs) + `,`,
+		`Samples:` + repeatedStringForSamples + `,`,
+		`Histograms:` + repeatedStringForHistograms + `,`,
+		`Exemplars:` + repeatedStringForExemplars + `,`,
+		`Metadata:` + strings.Replace(strings.Replace(this.Metadata.String(), "Metadata", "Metadata", 1), `&`, ``, 1) + `,`,
+		`CreatedTimestamp:` + fmt.Sprintf("%v", this.CreatedTimestamp) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Exemplar) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Exemplar{`,
+		`LabelsRefs:` + fmt.Sprintf("%v", this.LabelsRefs) + `,`,
+		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+		`Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Sample) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Sample{`,
+		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+		`Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Metadata) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Metadata{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`HelpRef:` + fmt.Sprintf("%v", this.HelpRef) + `,`,
+		`UnitRef:` + fmt.Sprintf("%v", this.UnitRef) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Histogram) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForNegativeSpans := "[]BucketSpan{"
+	for _, f := range this.NegativeSpans {
+		repeatedStringForNegativeSpans += strings.Replace(strings.Replace(f.String(), "BucketSpan", "BucketSpan", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForNegativeSpans += "}"
+	repeatedStringForPositiveSpans := "[]BucketSpan{"
+	for _, f := range this.PositiveSpans {
+		repeatedStringForPositiveSpans += strings.Replace(strings.Replace(f.String(), "BucketSpan", "BucketSpan", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForPositiveSpans += "}"
+	s := strings.Join([]string{`&Histogram{`,
+		`Count:` + fmt.Sprintf("%v", this.Count) + `,`,
+		`Sum:` + fmt.Sprintf("%v", this.Sum) + `,`,
+		`Schema:` + fmt.Sprintf("%v", this.Schema) + `,`,
+		`ZeroThreshold:` + fmt.Sprintf("%v", this.ZeroThreshold) + `,`,
+		`ZeroCount:` + fmt.Sprintf("%v", this.ZeroCount) + `,`,
+		`NegativeSpans:` + repeatedStringForNegativeSpans + `,`,
+		`NegativeDeltas:` + fmt.Sprintf("%v", this.NegativeDeltas) + `,`,
+		`NegativeCounts:` + fmt.Sprintf("%v", this.NegativeCounts) + `,`,
+		`PositiveSpans:` + repeatedStringForPositiveSpans + `,`,
+		`PositiveDeltas:` + fmt.Sprintf("%v", this.PositiveDeltas) + `,`,
+		`PositiveCounts:` + fmt.Sprintf("%v", this.PositiveCounts) + `,`,
+		`ResetHint:` + fmt.Sprintf("%v", this.ResetHint) + `,`,
+		`Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`,
+		`CustomValues:` + fmt.Sprintf("%v", this.CustomValues) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Histogram_CountInt) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Histogram_CountInt{`,
+		`CountInt:` + fmt.Sprintf("%v", this.CountInt) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Histogram_CountFloat) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Histogram_CountFloat{`,
+		`CountFloat:` + fmt.Sprintf("%v", this.CountFloat) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Histogram_ZeroCountInt) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Histogram_ZeroCountInt{`,
+		`ZeroCountInt:` + fmt.Sprintf("%v", this.ZeroCountInt) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Histogram_ZeroCountFloat) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Histogram_ZeroCountFloat{`,
+		`ZeroCountFloat:` + fmt.Sprintf("%v", this.ZeroCountFloat) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *BucketSpan) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&BucketSpan{`,
+		`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
+		`Length:` + fmt.Sprintf("%v", this.Length) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringCortexv2(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *WriteRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCortexv2
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+			}
+			m.Source = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Source |= WriteRequest_SourceEnum(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Symbols = append(m.Symbols, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Timeseries = append(m.Timeseries, PreallocTimeseriesV2{})
+			if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1000:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SkipLabelNameValidation", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.SkipLabelNameValidation = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCortexv2(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WriteResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCortexv2
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WriteResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WriteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
+			}
+			m.Samples = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Samples |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType)
+			}
+			m.Histograms = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Histograms |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+			}
+			m.Exemplars = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Exemplars |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCortexv2(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TimeSeries) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCortexv2
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType == 0 {
+				var v uint32
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortexv2
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint32(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.LabelsRefs = append(m.LabelsRefs, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortexv2
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthCortexv2
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthCortexv2
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.LabelsRefs) == 0 {
+					m.LabelsRefs = make([]uint32, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint32
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowCortexv2
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint32(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.LabelsRefs = append(m.LabelsRefs, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType)
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Samples = append(m.Samples, Sample{})
+			if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Histograms = append(m.Histograms, Histogram{})
+			if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Exemplars = append(m.Exemplars, Exemplar{})
+			if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
+			}
+			m.CreatedTimestamp = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.CreatedTimestamp |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCortexv2(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Exemplar) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCortexv2
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Exemplar: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType == 0 {
+				var v uint32
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortexv2
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint32(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.LabelsRefs = append(m.LabelsRefs, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortexv2
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthCortexv2
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthCortexv2
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.LabelsRefs) == 0 {
+					m.LabelsRefs = make([]uint32, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint32
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowCortexv2
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint32(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.LabelsRefs = append(m.LabelsRefs, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType)
+			}
+		case 2:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.Value = float64(math.Float64frombits(v))
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+			}
+			m.Timestamp = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Timestamp |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCortexv2(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Sample) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCortexv2
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Sample: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.Value = float64(math.Float64frombits(v))
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+			}
+			m.Timestamp = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Timestamp |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCortexv2(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Metadata) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCortexv2
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= Metadata_MetricType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HelpRef", wireType)
+			}
+			m.HelpRef = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.HelpRef |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UnitRef", wireType)
+			}
+			m.UnitRef = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.UnitRef |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCortexv2(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Histogram) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCortexv2
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Histogram: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType)
+			}
+			var v uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Count = &Histogram_CountInt{v}
+		case 2:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))}
+		case 3:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.Sum = float64(math.Float64frombits(v))
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
+			m.Schema = v
+		case 5:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.ZeroThreshold = float64(math.Float64frombits(v))
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType)
+			}
+			var v uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ZeroCount = &Histogram_ZeroCountInt{v}
+		case 7:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))}
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.NegativeSpans = append(m.NegativeSpans, BucketSpan{})
+			if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortexv2
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
+				m.NegativeDeltas = append(m.NegativeDeltas, int64(v))
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortexv2
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthCortexv2
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthCortexv2
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.NegativeDeltas) == 0 {
+					m.NegativeDeltas = make([]int64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowCortexv2
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
+					m.NegativeDeltas = append(m.NegativeDeltas, int64(v))
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType)
+			}
+		case 10:
+			if wireType == 1 {
+				var v uint64
+				if (iNdEx + 8) > l {
+					return io.ErrUnexpectedEOF
+				}
+				v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+				iNdEx += 8
+				v2 := float64(math.Float64frombits(v))
+				m.NegativeCounts = append(m.NegativeCounts, v2)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortexv2
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthCortexv2
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthCortexv2
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				elementCount = packedLen / 8
+				if elementCount != 0 && len(m.NegativeCounts) == 0 {
+					m.NegativeCounts = make([]float64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					if (iNdEx + 8) > l {
+						return io.ErrUnexpectedEOF
+					}
+					v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+					iNdEx += 8
+					v2 := float64(math.Float64frombits(v))
+					m.NegativeCounts = append(m.NegativeCounts, v2)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType)
+			}
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PositiveSpans = append(m.PositiveSpans, BucketSpan{})
+			if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 12:
+			if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortexv2
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
+				m.PositiveDeltas = append(m.PositiveDeltas, int64(v))
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortexv2
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthCortexv2
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthCortexv2
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.PositiveDeltas) == 0 {
+					m.PositiveDeltas = make([]int64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowCortexv2
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
+					m.PositiveDeltas = append(m.PositiveDeltas, int64(v))
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType)
+			}
+		case 13:
+			if wireType == 1 {
+				var v uint64
+				if (iNdEx + 8) > l {
+					return io.ErrUnexpectedEOF
+				}
+				v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+				iNdEx += 8
+				v2 := float64(math.Float64frombits(v))
+				m.PositiveCounts = append(m.PositiveCounts, v2)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortexv2
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthCortexv2
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthCortexv2
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				elementCount = packedLen / 8
+				if elementCount != 0 && len(m.PositiveCounts) == 0 {
+					m.PositiveCounts = make([]float64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					if (iNdEx + 8) > l {
+						return io.ErrUnexpectedEOF
+					}
+					v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+					iNdEx += 8
+					v2 := float64(math.Float64frombits(v))
+					m.PositiveCounts = append(m.PositiveCounts, v2)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType)
+			}
+		case 14:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType)
+			}
+			m.ResetHint = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 15:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+			}
+			m.Timestamp = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Timestamp |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 16:
+			if wireType == 1 {
+				var v uint64
+				if (iNdEx + 8) > l {
+					return io.ErrUnexpectedEOF
+				}
+				v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+				iNdEx += 8
+				v2 := float64(math.Float64frombits(v))
+				m.CustomValues = append(m.CustomValues, v2)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortexv2
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthCortexv2
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthCortexv2
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				elementCount = packedLen / 8
+				if elementCount != 0 && len(m.CustomValues) == 0 {
+					m.CustomValues = make([]float64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					if (iNdEx + 8) > l {
+						return io.ErrUnexpectedEOF
+					}
+					v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+					iNdEx += 8
+					v2 := float64(math.Float64frombits(v))
+					m.CustomValues = append(m.CustomValues, v2)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType)
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCortexv2(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *BucketSpan) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCortexv2
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
+			m.Offset = v
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType)
+			}
+			m.Length = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Length |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCortexv2(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortexv2
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipCortexv2(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowCortexv2
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowCortexv2
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthCortexv2
+			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthCortexv2
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowCortexv2
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipCortexv2(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthCortexv2
+				}
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthCortexv2 = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowCortexv2   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/pkg/cortexpbv2/cortexv2.proto b/pkg/cortexpbv2/cortexv2.proto
new file mode 100644
index 0000000000..c56038b9f0
--- /dev/null
+++ b/pkg/cortexpbv2/cortexv2.proto
@@ -0,0 +1,227 @@
+syntax = "proto3";
+
+package cortexpbv2;
+
+option go_package = "cortexpbv2";
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+// https://github.com/prometheus/prometheus/blob/main/prompb/io/prometheus/write/v2/types.proto
+message WriteRequest {
+  reserved 1 to 2;
+  enum SourceEnum {
+    API = 0;
+    RULE = 1;
+  }
+  SourceEnum Source = 3;
+  repeated string symbols = 4;
+  repeated TimeSeries timeseries = 5 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseriesV2"];
+
+  bool skip_label_name_validation = 1000; // set intentionally high to keep WriteRequest compatible with upstream Prometheus
+}
+
+message WriteResponse {
+  // Samples represents X-Prometheus-Remote-Write-Written-Samples
+  int64 Samples = 1;
+  // Histograms represents X-Prometheus-Remote-Write-Written-Histograms
+  int64 Histograms = 2;
+  // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars
+  int64 Exemplars = 3;
+}
+
+message TimeSeries {
+  repeated uint32 labels_refs = 1;
+  // Timeseries messages can either specify samples or (native) histogram samples
+  // (histogram field), but not both. For a typical sender (real-time metric
+  // streaming), in healthy cases, there will be only one sample or histogram.
+  //
+  // Samples and histograms are sorted by timestamp (older first).
+  repeated Sample samples = 2 [(gogoproto.nullable) = false];
+  repeated Histogram histograms = 3 [(gogoproto.nullable) = false];
+
+  // exemplars represents an optional set of exemplars attached to this series' samples.
+  repeated Exemplar exemplars = 4 [(gogoproto.nullable) = false];
+
+  // metadata represents the metadata associated with the given series' samples.
+  Metadata metadata = 5 [(gogoproto.nullable) = false];
+
+  // created_timestamp represents an optional created timestamp associated with
+  // this series' samples in ms format, typically for counter or histogram type
+  // metrics. Created timestamp represents the time when the counter started
+  // counting (sometimes referred to as start timestamp), which can increase
+  // the accuracy of query results.
+  //
+  // Note that some receivers might require this and in return fail to
+  // ingest such samples within the Request.
+  //
+  // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+  // for conversion from/to time.Time to Prometheus timestamp.
+  //
+  // Note that the "optional" keyword is omitted due to
+  // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
+  // Zero value means value not set. If you need to use exactly zero value for
+  // the timestamp, use 1 millisecond before or after.
+  int64 created_timestamp = 6;
+}
+
+// Exemplar is an additional information attached to some series' samples.
+// It is typically used to attach an example trace or request ID associated with
+// the metric changes.
+message Exemplar {
+  // labels_refs is an optional list of label name-value pair references, encoded
+  // as indices to the Request.symbols array. This list's len is always
+  // a multiple of 2, and the underlying labels should be sorted lexicographically.
+  // If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
+  repeated uint32 labels_refs = 1;
+  // value represents an exact example value. This can be useful when the exemplar
+  // is attached to a histogram, which only gives an estimated value through buckets.
+  double value = 2;
+  // timestamp represents the timestamp of the exemplar in ms.
+  //
+  // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+  // for conversion from/to time.Time to Prometheus timestamp.
+  int64 timestamp = 3;
+}
+
+// Sample represents series sample.
+message Sample {
+  // value of the sample.
+  double value = 1;
+  // timestamp represents timestamp of the sample in ms.
+  //
+  // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+  // for conversion from/to time.Time to Prometheus timestamp.
+  int64 timestamp = 2;
+}
+
+// Metadata represents the metadata associated with the given series' samples.
+message Metadata {
+  enum MetricType {
+    METRIC_TYPE_UNSPECIFIED    = 0;
+    METRIC_TYPE_COUNTER        = 1;
+    METRIC_TYPE_GAUGE          = 2;
+    METRIC_TYPE_HISTOGRAM      = 3;
+    METRIC_TYPE_GAUGEHISTOGRAM = 4;
+    METRIC_TYPE_SUMMARY        = 5;
+    METRIC_TYPE_INFO           = 6;
+    METRIC_TYPE_STATESET       = 7;
+  }
+  MetricType type = 1;
+  // help_ref is a reference to the Request.symbols array representing help
+  // text for the metric. Help is optional, reference should point to an empty string in
+  // such a case.
+  uint32 help_ref = 3;
+  // unit_ref is a reference to the Request.symbols array representing a unit
+  // for the metric. Unit is optional, reference should point to an empty string in
+  // such a case.
+  uint32 unit_ref = 4;
+}
+
+// A native histogram, also known as a sparse histogram.
+// Original design doc:
+// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
+// The appendix of this design doc also explains the concept of float
+// histograms. This Histogram message can represent both, the usual
+// integer histogram as well as a float histogram.
+message Histogram {
+  enum ResetHint {
+    RESET_HINT_UNSPECIFIED = 0; // Need to test for a counter reset explicitly.
+    RESET_HINT_YES     = 1; // This is the 1st histogram after a counter reset.
+    RESET_HINT_NO      = 2; // There was no counter reset between this and the previous Histogram.
+    RESET_HINT_GAUGE   = 3; // This is a gauge histogram where counter resets don't happen.
+  }
+
+  oneof count { // Count of observations in the histogram.
+    uint64 count_int   = 1;
+    double count_float = 2;
+  }
+  double sum = 3; // Sum of observations in the histogram.
+
+  // The schema defines the bucket schema. Currently, valid numbers
+  // are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be
+  // added in future for new bucketing layouts.
+  //
+  // The schema equal to -53 means custom buckets. See
+  // custom_values field description for more details.
+  //
+  // Values between -4 and 8 represent base-2 bucket schema, where 1
+  // is a bucket boundary in each case, and then each power of two is
+  // divided into 2^n (n is schema value) logarithmic buckets. Or in other words,
+  // each bucket boundary is the previous boundary times 2^(2^-n).
+  sint32 schema             = 4;
+  double zero_threshold     = 5; // Breadth of the zero bucket.
+  oneof zero_count { // Count in zero bucket.
+    uint64 zero_count_int     = 6;
+    double zero_count_float   = 7;
+  }
+
+  // Negative Buckets.
+  repeated BucketSpan negative_spans =  8 [(gogoproto.nullable) = false];
+  // Use either "negative_deltas" or "negative_counts", the former for
+  // regular histograms with integer counts, the latter for
+  // float histograms.
+  repeated sint64 negative_deltas    =  9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+  repeated double negative_counts    = 10; // Absolute count of each bucket.
+
+  // Positive Buckets.
+  //
+  // In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows:
+  // * The span offset+length points to an the index of the custom_values array
+  // or +Inf if pointing to the len of the array.
+  // * The counts and deltas have the same meaning as for exponential histograms.
+  repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false];
+  // Use either "positive_deltas" or "positive_counts", the former for
+  // regular histograms with integer counts, the latter for
+  // float histograms.
+  repeated sint64 positive_deltas    = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+  repeated double positive_counts    = 13; // Absolute count of each bucket.
+
+  ResetHint reset_hint               = 14;
+  // timestamp represents timestamp of the sample in ms.
+  //
+  // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+  // for conversion from/to time.Time to Prometheus timestamp.
+  int64 timestamp = 15;
+
+  // custom_values is an additional field used by non-exponential bucketing layouts.
+  //
+  // For custom buckets (-53 schema value) custom_values specify monotonically
+  // increasing upper inclusive boundaries for the bucket counts with arbitrary
+  // widths for this histogram. In other words, custom_values represents custom,
+  // explicit bucketing that could have been converted from the classic histograms.
+  //
+  // Those bounds are then referenced by spans in positive_spans with corresponding positive
+  // counts of deltas (refer to positive_spans for more details). This way we can
+  // have encode sparse histograms with custom bucketing (many buckets are often
+  // not used).
+  //
+  // Note that for custom bounds, even negative observations are placed in the positive
+  // counts to simplify the implementation and avoid ambiguity of where to place
+  // an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and
+  // the zero bucket are unused, if the schema indicates custom bucketing.
+  //
+  // For each upper boundary the previous boundary represent the lower exclusive
+  // boundary for that bucket. The first element is the upper inclusive boundary
+  // for the first bucket, which implicitly has a lower inclusive bound of -Inf.
+  // This is similar to "le" label semantics on classic histograms. You may add a
+  // bucket with an upper bound of 0 to make sure that you really have no negative
+  // observations, but in practice, native histogram rendering will show both with
+  // or without first upper boundary 0 and no negative counts as the same case.
+  //
+  // The last element is not only the upper inclusive bound of the last regular
+  // bucket, but implicitly the lower exclusive bound of the +Inf bucket.
+  repeated double custom_values = 16;
+}
+
+// A BucketSpan defines a number of consecutive buckets with their
+// offset. Logically, it would be more straightforward to include the
+// bucket counts in the Span. However, the protobuf representation is
+// more compact in the way the data is structured here (with all the
+// buckets in a single array separate from the Spans).
+message BucketSpan {
+  sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
+  uint32 length = 2; // Length of consecutive buckets.
+}

From 911497b0a5ddd50615949aba04dc4e3ce2120958 Mon Sep 17 00:00:00 2001
From: SungJin1212 <tjdwls1201@gmail.com>
Date: Thu, 7 Nov 2024 17:39:34 +0900
Subject: [PATCH 2/6] Add prealloc timeseries v2

Signed-off-by: SungJin1212 <tjdwls1201@gmail.com>
---
 pkg/cortexpb/slicesPool.go          |   6 +-
 pkg/cortexpb/slicesPool_test.go     |  12 +--
 pkg/cortexpb/timeseries.go          |   6 +-
 pkg/cortexpbv2/timeseriesv2.go      | 136 ++++++++++++++++++++++++++++
 pkg/cortexpbv2/timeseriesv2_test.go | 111 +++++++++++++++++++++++
 5 files changed, 259 insertions(+), 12 deletions(-)
 create mode 100644 pkg/cortexpbv2/timeseriesv2.go
 create mode 100644 pkg/cortexpbv2/timeseriesv2_test.go

diff --git a/pkg/cortexpb/slicesPool.go b/pkg/cortexpb/slicesPool.go
index e28d51d4f2..eaace237ce 100644
--- a/pkg/cortexpb/slicesPool.go
+++ b/pkg/cortexpb/slicesPool.go
@@ -13,7 +13,7 @@ type byteSlicePools struct {
 	pools []sync.Pool
 }
 
-func newSlicePool(pools int) *byteSlicePools {
+func NewSlicePool(pools int) *byteSlicePools {
 	sp := byteSlicePools{}
 	sp.init(pools)
 	return &sp
@@ -32,7 +32,7 @@ func (sp *byteSlicePools) init(pools int) {
 	}
 }
 
-func (sp *byteSlicePools) getSlice(size int) *[]byte {
+func (sp *byteSlicePools) GetSlice(size int) *[]byte {
 	index := int(math.Ceil(math.Log2(float64(size)))) - minPoolSizePower
 
 	if index >= len(sp.pools) {
@@ -50,7 +50,7 @@ func (sp *byteSlicePools) getSlice(size int) *[]byte {
 	return s
 }
 
-func (sp *byteSlicePools) reuseSlice(s *[]byte) {
+func (sp *byteSlicePools) ReuseSlice(s *[]byte) {
 	index := int(math.Floor(math.Log2(float64(cap(*s))))) - minPoolSizePower
 
 	if index >= len(sp.pools) || index < 0 {
diff --git a/pkg/cortexpb/slicesPool_test.go b/pkg/cortexpb/slicesPool_test.go
index 9bc56cdec3..dd35beb33a 100644
--- a/pkg/cortexpb/slicesPool_test.go
+++ b/pkg/cortexpb/slicesPool_test.go
@@ -9,22 +9,22 @@ import (
 )
 
 func TestFuzzyByteSlicePools(t *testing.T) {
-	sut := newSlicePool(20)
+	sut := NewSlicePool(20)
 	maxByteSize := int(math.Pow(2, 20+minPoolSizePower-1))
 
 	for i := 0; i < 1000; i++ {
 		size := rand.Int() % maxByteSize
-		s := sut.getSlice(size)
+		s := sut.GetSlice(size)
 		assert.Equal(t, len(*s), size)
-		sut.reuseSlice(s)
+		sut.ReuseSlice(s)
 	}
 }
 
 func TestReturnSliceSmallerThanMin(t *testing.T) {
-	sut := newSlicePool(20)
+	sut := NewSlicePool(20)
 	size := 3
 	buff := make([]byte, 0, size)
-	sut.reuseSlice(&buff)
-	buff2 := sut.getSlice(size * 2)
+	sut.ReuseSlice(&buff)
+	buff2 := sut.GetSlice(size * 2)
 	assert.Equal(t, len(*buff2), size*2)
 }
diff --git a/pkg/cortexpb/timeseries.go b/pkg/cortexpb/timeseries.go
index db7354ffe4..b880739ae2 100644
--- a/pkg/cortexpb/timeseries.go
+++ b/pkg/cortexpb/timeseries.go
@@ -47,7 +47,7 @@ var (
 			}
 		},
 	}
-	bytePool = newSlicePool(20)
+	bytePool = NewSlicePool(20)
 )
 
 // PreallocConfig configures how structures will be preallocated to optimise
@@ -86,7 +86,7 @@ func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error {
 
 func (p *PreallocWriteRequest) Marshal() (dAtA []byte, err error) {
 	size := p.Size()
-	p.data = bytePool.getSlice(size)
+	p.data = bytePool.GetSlice(size)
 	dAtA = *p.data
 	n, err := p.MarshalToSizedBuffer(dAtA[:size])
 	if err != nil {
@@ -97,7 +97,7 @@ func (p *PreallocWriteRequest) Marshal() (dAtA []byte, err error) {
 
 func ReuseWriteRequest(req *PreallocWriteRequest) {
 	if req.data != nil {
-		bytePool.reuseSlice(req.data)
+		bytePool.ReuseSlice(req.data)
 		req.data = nil
 	}
 	req.Source = 0
diff --git a/pkg/cortexpbv2/timeseriesv2.go b/pkg/cortexpbv2/timeseriesv2.go
new file mode 100644
index 0000000000..684e4bfac0
--- /dev/null
+++ b/pkg/cortexpbv2/timeseriesv2.go
@@ -0,0 +1,136 @@
+package cortexpbv2
+
+import (
+	"sync"
+
+	"github.com/cortexproject/cortex/pkg/cortexpb"
+)
+
+var (
+	expectedTimeseries          = 100
+	expectedLabels              = 20
+	expectedSymbols             = 20
+	expectedSamplesPerSeries    = 10
+	expectedExemplarsPerSeries  = 1
+	expectedHistogramsPerSeries = 1
+
+	slicePool = sync.Pool{
+		New: func() interface{} {
+			return make([]PreallocTimeseriesV2, 0, expectedTimeseries)
+		},
+	}
+
+	timeSeriesPool = sync.Pool{
+		New: func() interface{} {
+			return &TimeSeries{
+				LabelsRefs: make([]uint32, 0, expectedLabels),
+				Samples:    make([]Sample, 0, expectedSamplesPerSeries),
+				Histograms: make([]Histogram, 0, expectedHistogramsPerSeries),
+				Exemplars:  make([]Exemplar, 0, expectedExemplarsPerSeries),
+				Metadata:   Metadata{},
+			}
+		},
+	}
+
+	writeRequestPool = sync.Pool{
+		New: func() interface{} {
+			return &PreallocWriteRequestV2{
+				WriteRequest: WriteRequest{
+					Symbols: make([]string, 0, expectedSymbols),
+				},
+			}
+		},
+	}
+	bytePool = cortexpb.NewSlicePool(20)
+)
+
+// PreallocWriteRequestV2 is a WriteRequest which preallocs slices on Unmarshal.
+type PreallocWriteRequestV2 struct {
+	WriteRequest
+	data *[]byte
+}
+
+// Unmarshal implements proto.Message.
+func (p *PreallocWriteRequestV2) Unmarshal(dAtA []byte) error {
+	p.Timeseries = PreallocTimeseriesV2SliceFromPool()
+	return p.WriteRequest.Unmarshal(dAtA)
+}
+
+func (p *PreallocWriteRequestV2) Marshal() (dAtA []byte, err error) {
+	size := p.Size()
+	p.data = bytePool.GetSlice(size)
+	dAtA = *p.data
+	n, err := p.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+// PreallocTimeseriesV2 is a TimeSeries which preallocs slices on Unmarshal.
+type PreallocTimeseriesV2 struct {
+	*TimeSeries
+}
+
+// Unmarshal implements proto.Message.
+func (p *PreallocTimeseriesV2) Unmarshal(dAtA []byte) error {
+	p.TimeSeries = TimeseriesV2FromPool()
+	return p.TimeSeries.Unmarshal(dAtA)
+}
+
+func ReuseWriteRequestV2(req *PreallocWriteRequestV2) {
+	if req.data != nil {
+		bytePool.ReuseSlice(req.data)
+		req.data = nil
+	}
+	req.Source = 0
+	req.Symbols = nil
+	req.Timeseries = nil
+	writeRequestPool.Put(req)
+}
+
+func PreallocWriteRequestV2FromPool() *PreallocWriteRequestV2 {
+	return writeRequestPool.Get().(*PreallocWriteRequestV2)
+}
+
+// PreallocTimeseriesV2SliceFromPool retrieves a slice of PreallocTimeseriesV2 from a sync.Pool.
+// ReuseSlice should be called once done.
+func PreallocTimeseriesV2SliceFromPool() []PreallocTimeseriesV2 {
+	return slicePool.Get().([]PreallocTimeseriesV2)
+}
+
+// ReuseSlice puts the slice back into a sync.Pool for reuse.
+func ReuseSlice(ts []PreallocTimeseriesV2) {
+	for i := range ts {
+		ReuseTimeseries(ts[i].TimeSeries)
+	}
+
+	slicePool.Put(ts[:0]) //nolint:staticcheck //see comment on slicePool for more details
+}
+
+// TimeseriesV2FromPool retrieves a pointer to a TimeSeries from a sync.Pool.
+// ReuseTimeseries should be called once done, unless ReuseSlice was called on the slice that contains this TimeSeries.
+func TimeseriesV2FromPool() *TimeSeries {
+	return timeSeriesPool.Get().(*TimeSeries)
+}
+
+// ReuseTimeseries puts the timeseries back into a sync.Pool for reuse.
+func ReuseTimeseries(ts *TimeSeries) {
+	// clear ts lableRef and samples
+	ts.LabelsRefs = ts.LabelsRefs[:0]
+	ts.Samples = ts.Samples[:0]
+
+	// clear exmplar labelrefs
+	for i := range ts.Exemplars {
+		ts.Exemplars[i].LabelsRefs = ts.Exemplars[i].LabelsRefs[:0]
+	}
+
+	for i := range ts.Histograms {
+		ts.Histograms[i].Reset()
+	}
+
+	ts.Exemplars = ts.Exemplars[:0]
+	ts.Histograms = ts.Histograms[:0]
+	ts.Metadata = Metadata{}
+	timeSeriesPool.Put(ts)
+}
diff --git a/pkg/cortexpbv2/timeseriesv2_test.go b/pkg/cortexpbv2/timeseriesv2_test.go
new file mode 100644
index 0000000000..d10527564e
--- /dev/null
+++ b/pkg/cortexpbv2/timeseriesv2_test.go
@@ -0,0 +1,111 @@
+package cortexpbv2
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/gogo/protobuf/proto"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestPreallocTimeseriesV2SliceFromPool(t *testing.T) {
+	t.Run("new instance is provided when not available to reuse", func(t *testing.T) {
+		first := PreallocTimeseriesV2SliceFromPool()
+		second := PreallocTimeseriesV2SliceFromPool()
+
+		assert.NotSame(t, first, second)
+	})
+
+	t.Run("instance is cleaned before reusing", func(t *testing.T) {
+		slice := PreallocTimeseriesV2SliceFromPool()
+		slice = append(slice, PreallocTimeseriesV2{TimeSeries: &TimeSeries{}})
+		ReuseSlice(slice)
+
+		reused := PreallocTimeseriesV2SliceFromPool()
+		assert.Len(t, reused, 0)
+	})
+}
+
+func TestTimeseriesV2FromPool(t *testing.T) {
+	t.Run("new instance is provided when not available to reuse", func(t *testing.T) {
+		first := TimeseriesV2FromPool()
+		second := TimeseriesV2FromPool()
+
+		assert.NotSame(t, first, second)
+	})
+
+	t.Run("instance is cleaned before reusing", func(t *testing.T) {
+		ts := TimeseriesV2FromPool()
+		ts.LabelsRefs = []uint32{1, 2}
+		ts.Samples = []Sample{{Value: 1, Timestamp: 2}}
+		ts.Exemplars = []Exemplar{{LabelsRefs: []uint32{1, 2}, Value: 1, Timestamp: 2}}
+		ts.Histograms = []Histogram{{}}
+		fmt.Println("ts.Histograms", len(ts.Histograms))
+		ReuseTimeseries(ts)
+
+		reused := TimeseriesV2FromPool()
+		assert.Len(t, reused.LabelsRefs, 0)
+		assert.Len(t, reused.Samples, 0)
+		assert.Len(t, reused.Exemplars, 0)
+		assert.Len(t, reused.Histograms, 0)
+	})
+}
+
+func BenchmarkMarshallWriteRequest(b *testing.B) {
+	ts := PreallocTimeseriesV2SliceFromPool()
+
+	for i := 0; i < 100; i++ {
+		ts = append(ts, PreallocTimeseriesV2{TimeSeries: TimeseriesV2FromPool()})
+		ts[i].LabelsRefs = []uint32{1, 2, 3, 4, 5, 6, 7, 8}
+		ts[i].Samples = []Sample{{Value: 1, Timestamp: 2}}
+	}
+
+	tests := []struct {
+		name                string
+		writeRequestFactory func() proto.Marshaler
+		clean               func(in interface{})
+	}{
+		{
+			name: "no-pool",
+			writeRequestFactory: func() proto.Marshaler {
+				return &WriteRequest{Timeseries: ts}
+			},
+			clean: func(in interface{}) {},
+		},
+		{
+			name: "byte pool",
+			writeRequestFactory: func() proto.Marshaler {
+				w := &PreallocWriteRequestV2{}
+				w.Timeseries = ts
+				return w
+			},
+			clean: func(in interface{}) {
+				ReuseWriteRequestV2(in.(*PreallocWriteRequestV2))
+			},
+		},
+		{
+			name: "byte and write pool",
+			writeRequestFactory: func() proto.Marshaler {
+				w := PreallocWriteRequestV2FromPool()
+				w.Timeseries = ts
+				return w
+			},
+			clean: func(in interface{}) {
+				ReuseWriteRequestV2(in.(*PreallocWriteRequestV2))
+			},
+		},
+	}
+
+	for _, tc := range tests {
+		b.Run(tc.name, func(b *testing.B) {
+			for i := 0; i < b.N; i++ {
+				w := tc.writeRequestFactory()
+				_, err := w.Marshal()
+				require.NoError(b, err)
+				tc.clean(w)
+			}
+			b.ReportAllocs()
+		})
+	}
+}

From d54383ea0857ffe80cb3f99f6f807c4347528572 Mon Sep 17 00:00:00 2001
From: SungJin1212 <tjdwls1201@gmail.com>
Date: Thu, 7 Nov 2024 17:42:30 +0900
Subject: [PATCH 3/6] Add remote write v2 api

Signed-off-by: SungJin1212 <tjdwls1201@gmail.com>
---
 pkg/api/api.go                |  25 +++-
 pkg/ruler/compat.go           |   2 +
 pkg/ruler/compat_test.go      |  14 +-
 pkg/ruler/pusher_mock_test.go |   6 +
 pkg/util/push/push.go         | 148 ++++++++++++++++---
 pkg/util/push/push_test.go    | 264 +++++++++++++++++++++++++++++++---
 6 files changed, 416 insertions(+), 43 deletions(-)

diff --git a/pkg/api/api.go b/pkg/api/api.go
index 9de60b9bc4..5afd9504ef 100644
--- a/pkg/api/api.go
+++ b/pkg/api/api.go
@@ -23,6 +23,7 @@ import (
 	"github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb"
 	"github.com/cortexproject/cortex/pkg/compactor"
 	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/distributor"
 	"github.com/cortexproject/cortex/pkg/distributor/distributorpb"
 	frontendv1 "github.com/cortexproject/cortex/pkg/frontend/v1"
@@ -45,6 +46,9 @@ import (
 
 // DistributorPushWrapper wraps around a push. It is similar to middleware.Interface.
 type DistributorPushWrapper func(next push.Func) push.Func
+
+// DistributorPushWrapperV2 wraps around a push. It is similar to middleware.Interface.
+type DistributorPushWrapperV2 func(next push.FuncV2) push.FuncV2
 type ConfigHandler func(actualCfg interface{}, defaultCfg interface{}) http.HandlerFunc
 
 type Config struct {
@@ -60,7 +64,8 @@ type Config struct {
 
 	// This allows downstream projects to wrap the distributor push function
 	// and access the deserialized write requests before/after they are pushed.
-	DistributorPushWrapper DistributorPushWrapper `yaml:"-"`
+	DistributorPushWrapper   DistributorPushWrapper   `yaml:"-"`
+	DistributorPushWrapperV2 DistributorPushWrapperV2 `yaml:"-"`
 
 	// The CustomConfigHandler allows for providing a different handler for the
 	// `/config` endpoint. If this field is set _before_ the API module is
@@ -107,6 +112,15 @@ func (cfg *Config) Validate() error {
 	return nil
 }
 
+// Push either wraps the distributor push function as configured or returns the distributor push directly.
+func (cfg *Config) wrapDistributorPushV2(d *distributor.Distributor) push.FuncV2 {
+	if cfg.DistributorPushWrapperV2 != nil {
+		return cfg.DistributorPushWrapperV2(d.PushV2)
+	}
+
+	return d.PushV2
+}
+
 // Push either wraps the distributor push function as configured or returns the distributor push directly.
 func (cfg *Config) wrapDistributorPush(d *distributor.Distributor) push.Func {
 	if cfg.DistributorPushWrapper != nil {
@@ -277,7 +291,7 @@ func (a *API) RegisterRuntimeConfig(runtimeConfigHandler http.HandlerFunc) {
 func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distributor.Config, overrides *validation.Overrides) {
 	distributorpb.RegisterDistributorServer(a.server.GRPC, d)
 
-	a.RegisterRoute("/api/v1/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST")
+	a.RegisterRoute("/api/v1/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d), a.cfg.wrapDistributorPushV2(d)), true, "POST")
 	a.RegisterRoute("/api/v1/otlp/v1/metrics", push.OTLPHandler(overrides, pushConfig.OTLPConfig, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST")
 
 	a.indexPage.AddLink(SectionAdminEndpoints, "/distributor/ring", "Distributor Ring Status")
@@ -289,7 +303,7 @@ func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distrib
 	a.RegisterRoute("/distributor/ha_tracker", d.HATracker, false, "GET")
 
 	// Legacy Routes
-	a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/push"), push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST")
+	a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/push"), push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d), a.cfg.wrapDistributorPushV2(d)), true, "POST")
 	a.RegisterRoute("/all_user_stats", http.HandlerFunc(d.AllUserStatsHandler), false, "GET")
 	a.RegisterRoute("/ha-tracker", d.HATracker, false, "GET")
 }
@@ -304,6 +318,7 @@ type Ingester interface {
 	AllUserStatsHandler(http.ResponseWriter, *http.Request)
 	ModeHandler(http.ResponseWriter, *http.Request)
 	Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error)
+	PushV2(context.Context, *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error)
 }
 
 // RegisterIngester registers the ingesters HTTP and GRPC service
@@ -322,12 +337,12 @@ func (a *API) RegisterIngester(i Ingester, pushConfig distributor.Config) {
 	a.RegisterRoute("/ingester/renewTokens", http.HandlerFunc(i.RenewTokenHandler), false, "GET", "POST")
 	a.RegisterRoute("/ingester/all_user_stats", http.HandlerFunc(i.AllUserStatsHandler), false, "GET")
 	a.RegisterRoute("/ingester/mode", http.HandlerFunc(i.ModeHandler), false, "GET", "POST")
-	a.RegisterRoute("/ingester/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging.
+	a.RegisterRoute("/ingester/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push, i.PushV2), true, "POST") // For testing and debugging.
 
 	// Legacy Routes
 	a.RegisterRoute("/flush", http.HandlerFunc(i.FlushHandler), false, "GET", "POST")
 	a.RegisterRoute("/shutdown", http.HandlerFunc(i.ShutdownHandler), false, "GET", "POST")
-	a.RegisterRoute("/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging.
+	a.RegisterRoute("/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push, i.PushV2), true, "POST") // For testing and debugging.
 }
 
 func (a *API) RegisterTenantDeletion(api *purger.TenantDeletionAPI) {
diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go
index b370e34d20..4a2916c2af 100644
--- a/pkg/ruler/compat.go
+++ b/pkg/ruler/compat.go
@@ -22,6 +22,7 @@ import (
 	"github.com/weaveworks/common/user"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/querier"
 	"github.com/cortexproject/cortex/pkg/querier/stats"
 	"github.com/cortexproject/cortex/pkg/ring/client"
@@ -33,6 +34,7 @@ import (
 // Pusher is an ingester server that accepts pushes.
 type Pusher interface {
 	Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error)
+	PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error)
 }
 
 type PusherAppender struct {
diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go
index 3a3d663350..7d489b65b0 100644
--- a/pkg/ruler/compat_test.go
+++ b/pkg/ruler/compat_test.go
@@ -23,14 +23,22 @@ import (
 	"github.com/weaveworks/common/httpgrpc"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/querier/stats"
 	"github.com/cortexproject/cortex/pkg/util/validation"
 )
 
 type fakePusher struct {
-	request  *cortexpb.WriteRequest
-	response *cortexpb.WriteResponse
-	err      error
+	request    *cortexpb.WriteRequest
+	requestV2  *cortexpbv2.WriteRequest
+	response   *cortexpb.WriteResponse
+	responseV2 *cortexpbv2.WriteResponse
+	err        error
+}
+
+func (p *fakePusher) PushV2(ctx context.Context, r *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+	p.requestV2 = r
+	return p.responseV2, p.err
 }
 
 func (p *fakePusher) Push(ctx context.Context, r *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) {
diff --git a/pkg/ruler/pusher_mock_test.go b/pkg/ruler/pusher_mock_test.go
index ecfe6f3164..02fc5b13de 100644
--- a/pkg/ruler/pusher_mock_test.go
+++ b/pkg/ruler/pusher_mock_test.go
@@ -6,6 +6,7 @@ import (
 	"github.com/stretchr/testify/mock"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 )
 
 type pusherMock struct {
@@ -16,6 +17,11 @@ func newPusherMock() *pusherMock {
 	return &pusherMock{}
 }
 
+func (m *pusherMock) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+	args := m.Called(ctx, req)
+	return args.Get(0).(*cortexpbv2.WriteResponse), args.Error(1)
+}
+
 func (m *pusherMock) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) {
 	args := m.Called(ctx, req)
 	return args.Get(0).(*cortexpb.WriteResponse), args.Error(1)
diff --git a/pkg/util/push/push.go b/pkg/util/push/push.go
index 9cabb39522..c115f559ed 100644
--- a/pkg/util/push/push.go
+++ b/pkg/util/push/push.go
@@ -2,22 +2,44 @@ package push
 
 import (
 	"context"
+	"fmt"
 	"net/http"
+	"strconv"
+	"strings"
 
 	"github.com/go-kit/log/level"
+	"github.com/prometheus/prometheus/config"
+	"github.com/prometheus/prometheus/storage/remote"
 	"github.com/weaveworks/common/httpgrpc"
 	"github.com/weaveworks/common/middleware"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/util"
 	"github.com/cortexproject/cortex/pkg/util/log"
 )
 
+const (
+	remoteWriteVersionHeader        = "X-Prometheus-Remote-Write-Version"
+	remoteWriteVersion1HeaderValue  = "0.1.0"
+	remoteWriteVersion20HeaderValue = "2.0.0"
+	appProtoContentType             = "application/x-protobuf"
+	appProtoV1ContentType           = "application/x-protobuf;proto=prometheus.WriteRequest"
+	appProtoV2ContentType           = "application/x-protobuf;proto=io.prometheus.write.v2.Request"
+
+	rw20WrittenSamplesHeader    = "X-Prometheus-Remote-Write-Samples-Written"
+	rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Histograms-Written"
+	rw20WrittenExemplarsHeader  = "X-Prometheus-Remote-Write-Exemplars-Written"
+)
+
 // Func defines the type of the push. It is similar to http.HandlerFunc.
 type Func func(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error)
 
+// FuncV2 defines the type of the pushV2. It is similar to http.HandlerFunc.
+type FuncV2 func(ctx context.Context, request *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error)
+
 // Handler is a http.Handler which accepts WriteRequests.
-func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push Func) http.Handler {
+func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push Func, pushV2 FuncV2) http.Handler {
 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 		ctx := r.Context()
 		logger := log.WithContext(ctx, log.Logger)
@@ -28,31 +50,123 @@ func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push F
 				logger = log.WithSourceIPs(source, logger)
 			}
 		}
-		var req cortexpb.PreallocWriteRequest
-		err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy)
+
+		contentType := r.Header.Get("Content-Type")
+		if contentType == "" {
+			contentType = appProtoContentType
+		}
+
+		msgType, err := parseProtoMsg(contentType)
 		if err != nil {
-			level.Error(logger).Log("err", err.Error())
-			http.Error(w, err.Error(), http.StatusBadRequest)
+			level.Error(logger).Log("Error decoding remote write request", "err", err)
+			http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
 			return
 		}
 
-		req.SkipLabelNameValidation = false
-		if req.Source == 0 {
-			req.Source = cortexpb.API
+		if msgType != config.RemoteWriteProtoMsgV1 && msgType != config.RemoteWriteProtoMsgV2 {
+			level.Error(logger).Log("Error decoding remote write request", "err", err)
+			http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
+			return
+		}
+
+		enc := r.Header.Get("Content-Encoding")
+		if enc == "" {
+		} else if enc != string(remote.SnappyBlockCompression) {
+			err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, remote.SnappyBlockCompression)
+			level.Error(logger).Log("Error decoding remote write request", "err", err)
+			http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
+			return
 		}
 
-		if _, err := push(ctx, &req.WriteRequest); err != nil {
-			resp, ok := httpgrpc.HTTPResponseFromError(err)
-			if !ok {
-				http.Error(w, err.Error(), http.StatusInternalServerError)
+		switch msgType {
+		case config.RemoteWriteProtoMsgV1:
+			var req cortexpb.PreallocWriteRequest
+			err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy)
+			if err != nil {
+				level.Error(logger).Log("err", err.Error())
+				http.Error(w, err.Error(), http.StatusBadRequest)
+				return
+			}
+
+			req.SkipLabelNameValidation = false
+			if req.Source == 0 {
+				req.Source = cortexpb.API
+			}
+
+			if _, err := push(ctx, &req.WriteRequest); err != nil {
+				resp, ok := httpgrpc.HTTPResponseFromError(err)
+				if !ok {
+					http.Error(w, err.Error(), http.StatusInternalServerError)
+					return
+				}
+				if resp.GetCode()/100 == 5 {
+					level.Error(logger).Log("msg", "push error", "err", err)
+				} else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests {
+					level.Warn(logger).Log("msg", "push refused", "err", err)
+				}
+				http.Error(w, string(resp.Body), int(resp.Code))
+			}
+		case config.RemoteWriteProtoMsgV2:
+			var req cortexpbv2.WriteRequest
+			err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy)
+			if err != nil {
+				fmt.Println("err", err)
+				level.Error(logger).Log("err", err.Error())
+				http.Error(w, err.Error(), http.StatusBadRequest)
 				return
 			}
-			if resp.GetCode()/100 == 5 {
-				level.Error(logger).Log("msg", "push error", "err", err)
-			} else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests {
-				level.Warn(logger).Log("msg", "push refused", "err", err)
+
+			req.SkipLabelNameValidation = false
+			if req.Source == 0 {
+				req.Source = cortexpbv2.API
+			}
+
+			if resp, err := pushV2(ctx, &req); err != nil {
+				resp, ok := httpgrpc.HTTPResponseFromError(err)
+				w.Header().Set(rw20WrittenSamplesHeader, "0")
+				w.Header().Set(rw20WrittenHistogramsHeader, "0")
+				w.Header().Set(rw20WrittenExemplarsHeader, "0")
+				if !ok {
+					http.Error(w, err.Error(), http.StatusInternalServerError)
+					return
+				}
+				if resp.GetCode()/100 == 5 {
+					level.Error(logger).Log("msg", "push error", "err", err)
+				} else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests {
+					level.Warn(logger).Log("msg", "push refused", "err", err)
+				}
+				http.Error(w, string(resp.Body), int(resp.Code))
+			} else {
+				w.Header().Set(rw20WrittenSamplesHeader, strconv.FormatInt(resp.Samples, 10))
+				w.Header().Set(rw20WrittenHistogramsHeader, strconv.FormatInt(resp.Histograms, 10))
+				w.Header().Set(rw20WrittenExemplarsHeader, strconv.FormatInt(resp.Exemplars, 10))
 			}
-			http.Error(w, string(resp.Body), int(resp.Code))
 		}
 	})
 }
+
+// Refer to parseProtoMsg in https://github.com/prometheus/prometheus/blob/main/storage/remote/write_handler.go
+func parseProtoMsg(contentType string) (config.RemoteWriteProtoMsg, error) {
+	contentType = strings.TrimSpace(contentType)
+
+	parts := strings.Split(contentType, ";")
+	if parts[0] != appProtoContentType {
+		return "", fmt.Errorf("expected %v as the first (media) part, got %v content-type", appProtoContentType, contentType)
+	}
+	// Parse potential https://www.rfc-editor.org/rfc/rfc9110#parameter
+	for _, p := range parts[1:] {
+		pair := strings.Split(p, "=")
+		if len(pair) != 2 {
+			return "", fmt.Errorf("as per https://www.rfc-editor.org/rfc/rfc9110#parameter expected parameters to be key-values, got %v in %v content-type", p, contentType)
+		}
+		if pair[0] == "proto" {
+			ret := config.RemoteWriteProtoMsg(pair[1])
+			if err := ret.Validate(); err != nil {
+				return "", fmt.Errorf("got %v content type; %w", contentType, err)
+			}
+			return ret, nil
+		}
+	}
+	// No "proto=" parameter, assuming v1.
+	return config.RemoteWriteProtoMsgV1, nil
+}
diff --git a/pkg/util/push/push_test.go b/pkg/util/push/push_test.go
index b806011a61..178f2346c9 100644
--- a/pkg/util/push/push_test.go
+++ b/pkg/util/push/push_test.go
@@ -10,42 +10,208 @@ import (
 
 	"github.com/golang/snappy"
 	"github.com/prometheus/prometheus/prompb"
+	writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 	"github.com/weaveworks/common/middleware"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 )
 
 func TestHandler_remoteWrite(t *testing.T) {
-	req := createRequest(t, createPrometheusRemoteWriteProtobuf(t))
-	resp := httptest.NewRecorder()
-	handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.API))
-	handler.ServeHTTP(resp, req)
-	assert.Equal(t, 200, resp.Code)
+	handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.API), verifyWriteRequestV2Handler(t, cortexpbv2.API))
+
+	t.Run("remote write v1", func(t *testing.T) {
+		req := createRequest(t, createPrometheusRemoteWriteProtobuf(t), false)
+		resp := httptest.NewRecorder()
+		handler.ServeHTTP(resp, req)
+		assert.Equal(t, 200, resp.Code)
+	})
+	t.Run("remote write v2", func(t *testing.T) {
+		req := createRequest(t, createPrometheusRemoteWriteV2Protobuf(t), true)
+		resp := httptest.NewRecorder()
+		handler.ServeHTTP(resp, req)
+		assert.Equal(t, 200, resp.Code)
+
+		// test header value
+		respHeader := resp.Header()
+		assert.Equal(t, "1", respHeader[rw20WrittenSamplesHeader][0])
+		assert.Equal(t, "1", respHeader[rw20WrittenHistogramsHeader][0])
+		assert.Equal(t, "1", respHeader[rw20WrittenExemplarsHeader][0])
+
+	})
+}
+
+func TestHandler_ContentTypeAndEncoding(t *testing.T) {
+	sourceIPs, _ := middleware.NewSourceIPs("SomeField", "(.*)")
+	handler := Handler(100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.API), verifyWriteRequestV2Handler(t, cortexpbv2.API))
+
+	tests := []struct {
+		description  string
+		reqHeaders   map[string]string
+		expectedCode int
+		isV2         bool
+	}{
+		{
+			description: "[RW 2.0] correct content-type",
+			reqHeaders: map[string]string{
+				"Content-Type":           appProtoV2ContentType,
+				"Content-Encoding":       "snappy",
+				remoteWriteVersionHeader: "2.0.0",
+			},
+			expectedCode: http.StatusOK,
+			isV2:         true,
+		},
+		{
+			description: "[RW 1.0] correct content-type",
+			reqHeaders: map[string]string{
+				"Content-Type":           appProtoV1ContentType,
+				"Content-Encoding":       "snappy",
+				remoteWriteVersionHeader: "0.1.0",
+			},
+			expectedCode: http.StatusOK,
+			isV2:         false,
+		},
+		{
+			description: "[RW 2.0] wrong content-type",
+			reqHeaders: map[string]string{
+				"Content-Type":           "yolo",
+				"Content-Encoding":       "snappy",
+				remoteWriteVersionHeader: "2.0.0",
+			},
+			expectedCode: http.StatusUnsupportedMediaType,
+			isV2:         true,
+		},
+		{
+			description: "[RW 2.0] wrong content-type",
+			reqHeaders: map[string]string{
+				"Content-Type":           "application/x-protobuf;proto=yolo",
+				"Content-Encoding":       "snappy",
+				remoteWriteVersionHeader: "2.0.0",
+			},
+			expectedCode: http.StatusUnsupportedMediaType,
+			isV2:         true,
+		},
+		{
+			description: "[RW 2.0] wrong content-encoding",
+			reqHeaders: map[string]string{
+				"Content-Type":           "application/x-protobuf;proto=io.prometheus.write.v2.Request",
+				"Content-Encoding":       "zstd",
+				remoteWriteVersionHeader: "2.0.0",
+			},
+			expectedCode: http.StatusUnsupportedMediaType,
+			isV2:         true,
+		},
+		{
+			description:  "no header, should treated as RW 1.0",
+			expectedCode: http.StatusOK,
+			isV2:         false,
+		},
+		{
+			description: "missing content-type, should treated as RW 1.0",
+			reqHeaders: map[string]string{
+				"Content-Encoding":       "snappy",
+				remoteWriteVersionHeader: "2.0.0",
+			},
+			expectedCode: http.StatusOK,
+			isV2:         false,
+		},
+		{
+			description: "missing content-encoding",
+			reqHeaders: map[string]string{
+				"Content-Type":           appProtoV2ContentType,
+				remoteWriteVersionHeader: "2.0.0",
+			},
+			expectedCode: http.StatusOK,
+			isV2:         true,
+		},
+		{
+			description: "missing remote write version, should treated based on Content-type",
+			reqHeaders: map[string]string{
+				"Content-Type":     appProtoV2ContentType,
+				"Content-Encoding": "snappy",
+			},
+			expectedCode: http.StatusOK,
+			isV2:         true,
+		},
+		{
+			description: "missing remote write version, should treated based on Content-type",
+			reqHeaders: map[string]string{
+				"Content-Type":     appProtoV1ContentType,
+				"Content-Encoding": "snappy",
+			},
+			expectedCode: http.StatusOK,
+			isV2:         false,
+		},
+	}
+
+	for _, test := range tests {
+		t.Run(test.description, func(t *testing.T) {
+			if test.isV2 {
+				req := createRequestWithHeaders(t, test.reqHeaders, createCortexRemoteWriteV2Protobuf(t, false, cortexpbv2.API))
+				resp := httptest.NewRecorder()
+				handler.ServeHTTP(resp, req)
+				assert.Equal(t, test.expectedCode, resp.Code)
+			} else {
+				req := createRequestWithHeaders(t, test.reqHeaders, createCortexWriteRequestProtobuf(t, false, cortexpb.API))
+				resp := httptest.NewRecorder()
+				handler.ServeHTTP(resp, req)
+				assert.Equal(t, test.expectedCode, resp.Code)
+			}
+		})
+	}
 }
 
 func TestHandler_cortexWriteRequest(t *testing.T) {
-	req := createRequest(t, createCortexWriteRequestProtobuf(t, false))
-	resp := httptest.NewRecorder()
 	sourceIPs, _ := middleware.NewSourceIPs("SomeField", "(.*)")
-	handler := Handler(100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.RULE))
-	handler.ServeHTTP(resp, req)
-	assert.Equal(t, 200, resp.Code)
+	handler := Handler(100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.RULE), verifyWriteRequestV2Handler(t, cortexpbv2.RULE))
+
+	t.Run("remote write v1", func(t *testing.T) {
+		req := createRequest(t, createCortexWriteRequestProtobuf(t, false, cortexpb.RULE), false)
+		resp := httptest.NewRecorder()
+		handler.ServeHTTP(resp, req)
+		assert.Equal(t, 200, resp.Code)
+	})
+	t.Run("remote write v2", func(t *testing.T) {
+		req := createRequest(t, createCortexRemoteWriteV2Protobuf(t, false, cortexpbv2.RULE), true)
+		resp := httptest.NewRecorder()
+		handler.ServeHTTP(resp, req)
+		assert.Equal(t, 200, resp.Code)
+	})
 }
 
 func TestHandler_ignoresSkipLabelNameValidationIfSet(t *testing.T) {
 	for _, req := range []*http.Request{
-		createRequest(t, createCortexWriteRequestProtobuf(t, true)),
-		createRequest(t, createCortexWriteRequestProtobuf(t, false)),
+		createRequest(t, createCortexWriteRequestProtobuf(t, true, cortexpb.RULE), false),
+		createRequest(t, createCortexWriteRequestProtobuf(t, false, cortexpb.RULE), false),
+		createRequest(t, createCortexRemoteWriteV2Protobuf(t, true, cortexpbv2.RULE), true),
+		createRequest(t, createCortexRemoteWriteV2Protobuf(t, false, cortexpbv2.RULE), true),
 	} {
 		resp := httptest.NewRecorder()
-		handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.RULE))
+		handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.RULE), verifyWriteRequestV2Handler(t, cortexpbv2.RULE))
 		handler.ServeHTTP(resp, req)
 		assert.Equal(t, 200, resp.Code)
 	}
 }
 
+func verifyWriteRequestV2Handler(t *testing.T, expectSource cortexpbv2.WriteRequest_SourceEnum) func(ctx context.Context, request *cortexpbv2.WriteRequest) (response *cortexpbv2.WriteResponse, err error) {
+	t.Helper()
+	return func(ctx context.Context, request *cortexpbv2.WriteRequest) (response *cortexpbv2.WriteResponse, err error) {
+		assert.Len(t, request.Timeseries, 1)
+		assert.Equal(t, "__name__", request.Symbols[1])
+		assert.Equal(t, "foo", request.Symbols[2])
+		assert.Equal(t, expectSource, request.Source)
+		assert.False(t, request.SkipLabelNameValidation)
+		resp := &cortexpbv2.WriteResponse{
+			Samples:    1,
+			Histograms: 1,
+			Exemplars:  1,
+		}
+		return resp, nil
+	}
+}
+
 func verifyWriteRequestHandler(t *testing.T, expectSource cortexpb.WriteRequest_SourceEnum) func(ctx context.Context, request *cortexpb.WriteRequest) (response *cortexpb.WriteResponse, err error) {
 	t.Helper()
 	return func(ctx context.Context, request *cortexpb.WriteRequest) (response *cortexpb.WriteResponse, err error) {
@@ -58,17 +224,79 @@ func verifyWriteRequestHandler(t *testing.T, expectSource cortexpb.WriteRequest_
 	}
 }
 
-func createRequest(t *testing.T, protobuf []byte) *http.Request {
+func createRequestWithHeaders(t *testing.T, headers map[string]string, protobuf []byte) *http.Request {
+	t.Helper()
+	inoutBytes := snappy.Encode(nil, protobuf)
+	req, err := http.NewRequest("POST", "http://localhost/", bytes.NewReader(inoutBytes))
+	require.NoError(t, err)
+
+	for k, v := range headers {
+		req.Header.Set(k, v)
+	}
+	return req
+}
+
+func createRequest(t *testing.T, protobuf []byte, isV2 bool) *http.Request {
 	t.Helper()
 	inoutBytes := snappy.Encode(nil, protobuf)
 	req, err := http.NewRequest("POST", "http://localhost/", bytes.NewReader(inoutBytes))
 	require.NoError(t, err)
+
 	req.Header.Add("Content-Encoding", "snappy")
-	req.Header.Set("Content-Type", "application/x-protobuf")
-	req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
+
+	if isV2 {
+		req.Header.Set("Content-Type", appProtoV2ContentType)
+		req.Header.Set("X-Prometheus-Remote-Write-Version", remoteWriteVersion20HeaderValue)
+		return req
+	}
+
+	req.Header.Set("Content-Type", appProtoContentType)
+	req.Header.Set("X-Prometheus-Remote-Write-Version", remoteWriteVersion1HeaderValue)
 	return req
 }
 
+func createCortexRemoteWriteV2Protobuf(t *testing.T, skipLabelNameValidation bool, source cortexpbv2.WriteRequest_SourceEnum) []byte {
+	t.Helper()
+	input := cortexpbv2.WriteRequest{
+		Symbols: []string{"", "__name__", "foo"},
+		Timeseries: []cortexpbv2.PreallocTimeseriesV2{
+			{
+				TimeSeries: &cortexpbv2.TimeSeries{
+					LabelsRefs: []uint32{1, 2},
+					Samples: []cortexpbv2.Sample{
+						{Value: 1, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()},
+					},
+				},
+			},
+		},
+		Source:                  source,
+		SkipLabelNameValidation: skipLabelNameValidation,
+	}
+
+	inoutBytes, err := input.Marshal()
+	require.NoError(t, err)
+	return inoutBytes
+}
+
+func createPrometheusRemoteWriteV2Protobuf(t *testing.T) []byte {
+	t.Helper()
+	input := writev2.Request{
+		Symbols: []string{"", "__name__", "foo"},
+		Timeseries: []writev2.TimeSeries{
+			{
+				LabelsRefs: []uint32{1, 2},
+				Samples: []writev2.Sample{
+					{Value: 1, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()},
+				},
+			},
+		},
+	}
+
+	inoutBytes, err := input.Marshal()
+	require.NoError(t, err)
+	return inoutBytes
+}
+
 func createPrometheusRemoteWriteProtobuf(t *testing.T) []byte {
 	t.Helper()
 	input := prompb.WriteRequest{
@@ -87,7 +315,7 @@ func createPrometheusRemoteWriteProtobuf(t *testing.T) []byte {
 	require.NoError(t, err)
 	return inoutBytes
 }
-func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool) []byte {
+func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool, source cortexpb.WriteRequest_SourceEnum) []byte {
 	t.Helper()
 	ts := cortexpb.PreallocTimeseries{
 		TimeSeries: &cortexpb.TimeSeries{
@@ -101,7 +329,7 @@ func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool
 	}
 	input := cortexpb.WriteRequest{
 		Timeseries:              []cortexpb.PreallocTimeseries{ts},
-		Source:                  cortexpb.RULE,
+		Source:                  source,
 		SkipLabelNameValidation: skipLabelNameValidation,
 	}
 	inoutBytes, err := input.Marshal()

From 2b5ac96221af5eceb8fefa027c685080f4bc85d3 Mon Sep 17 00:00:00 2001
From: SungJin1212 <tjdwls1201@gmail.com>
Date: Fri, 8 Nov 2024 18:18:09 +0900
Subject: [PATCH 4/6] Implement remote write v2

Signed-off-by: SungJin1212 <tjdwls1201@gmail.com>
---
 .github/workflows/test-build-deploy.yml       |    1 +
 .golangci.yml                                 |    1 +
 CHANGELOG.md                                  |    1 +
 integration/e2e/util.go                       |   76 +
 integration/e2ecortex/client.go               |   42 +-
 integration/remote_write_v2_test.go           |  211 +
 pkg/cortex/modules_test.go                    |    5 +
 pkg/cortexpbv2/codecv2.go                     |  160 +
 pkg/cortexpbv2/compatv2.go                    |   88 +
 pkg/cortexpbv2/compatv2_test.go               |   71 +
 pkg/distributor/distributor.go                |  442 +-
 pkg/distributor/distributor_prw2_test.go      | 2525 ++++++++++
 pkg/distributor/distributor_test.go           |   76 +
 .../distributorpb/distributor.pb.go           |   59 +-
 .../distributorpb/distributor.proto           |    2 +
 pkg/distributor/stats.go                      |   62 +
 pkg/distributor/stats_test.go                 |   41 +
 pkg/ingester/client/client.go                 |   31 +
 pkg/ingester/client/cortex_mock_test.go       |    6 +
 pkg/ingester/client/ingester.pb.go            |  209 +-
 pkg/ingester/client/ingester.proto            |    3 +
 pkg/ingester/ingester.go                      |  370 ++
 pkg/ingester/ingester_prw2_test.go            | 4468 +++++++++++++++++
 pkg/util/validation/validate.go               |  138 +
 24 files changed, 8990 insertions(+), 98 deletions(-)
 create mode 100644 integration/remote_write_v2_test.go
 create mode 100644 pkg/cortexpbv2/codecv2.go
 create mode 100644 pkg/cortexpbv2/compatv2.go
 create mode 100644 pkg/cortexpbv2/compatv2_test.go
 create mode 100644 pkg/distributor/distributor_prw2_test.go
 create mode 100644 pkg/distributor/stats.go
 create mode 100644 pkg/distributor/stats_test.go
 create mode 100644 pkg/ingester/ingester_prw2_test.go

diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml
index 8139fbaf1f..76a0a48f7e 100644
--- a/.github/workflows/test-build-deploy.yml
+++ b/.github/workflows/test-build-deploy.yml
@@ -144,6 +144,7 @@ jobs:
           - integration_querier
           - integration_ruler
           - integration_query_fuzz
+          - integration_remote_write_v2
     steps:
       - name: Upgrade golang
         uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
diff --git a/.golangci.yml b/.golangci.yml
index dbfe02e837..e5336badfd 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -49,3 +49,4 @@ run:
     - integration_querier
     - integration_ruler
     - integration_query_fuzz
+    - integration_remote_write_v2
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4e77b62020..8b9a0d552e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,6 +15,7 @@
 * [FEATURE] Ruler: Minimize chances of missed rule group evaluations that can occur due to OOM kills, bad underlying nodes, or due to an unhealthy ruler that appears in the ring as healthy. This feature is enabled via `-ruler.enable-ha-evaluation` flag. #6129
 * [FEATURE] Store Gateway: Add an in-memory chunk cache. #6245
 * [FEATURE] Chunk Cache: Support multi level cache and add metrics. #6249
+* [FEATURE] Distributor/Ingester: Support remote write 2.0. It includes proto, samples, and (native) histograms ingestion. #6292
 * [ENHANCEMENT] S3 Bucket Client: Add a list objects version configs to configure list api object version. #6280
 * [ENHANCEMENT] OpenStack Swift: Add application credential configs for Openstack swift object storage backend. #6255
 * [ENHANCEMENT] Query Frontend: Add new query stats metrics `cortex_query_samples_scanned_total` and `cortex_query_peak_samples` to track scannedSamples and peakSample per user. #6228
diff --git a/integration/e2e/util.go b/integration/e2e/util.go
index 141d043ab5..51a3cfcaa0 100644
--- a/integration/e2e/util.go
+++ b/integration/e2e/util.go
@@ -18,6 +18,7 @@ import (
 	"github.com/prometheus/prometheus/model/histogram"
 	"github.com/prometheus/prometheus/model/labels"
 	"github.com/prometheus/prometheus/prompb"
+	writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
 	"github.com/prometheus/prometheus/storage"
 	"github.com/prometheus/prometheus/tsdb"
 	"github.com/prometheus/prometheus/tsdb/tsdbutil"
@@ -149,6 +150,40 @@ func GenerateSeries(name string, ts time.Time, additionalLabels ...prompb.Label)
 	return
 }
 
+func GenerateHistogramSeriesV2(name string, ts time.Time, i uint32, floatHistogram bool, additionalLabels ...prompb.Label) (symbols []string, series []writev2.TimeSeries) {
+	tsMillis := TimeToMilliseconds(ts)
+
+	st := writev2.NewSymbolTable()
+
+	lbs := labels.Labels{labels.Label{Name: "__name__", Value: name}}
+	for _, lbl := range additionalLabels {
+		lbs = append(lbs, labels.Label{Name: lbl.Name, Value: lbl.Value})
+	}
+
+	var (
+		h  *histogram.Histogram
+		fh *histogram.FloatHistogram
+		ph writev2.Histogram
+	)
+	if floatHistogram {
+		fh = tsdbutil.GenerateTestFloatHistogram(int(i))
+		ph = writev2.FromFloatHistogram(tsMillis, fh)
+	} else {
+		h = tsdbutil.GenerateTestHistogram(int(i))
+		ph = writev2.FromIntHistogram(tsMillis, h)
+	}
+
+	// Generate the series
+	series = append(series, writev2.TimeSeries{
+		LabelsRefs: st.SymbolizeLabels(lbs, nil),
+		Histograms: []writev2.Histogram{ph},
+	})
+
+	symbols = st.Symbols()
+
+	return
+}
+
 func GenerateHistogramSeries(name string, ts time.Time, i uint32, floatHistogram bool, additionalLabels ...prompb.Label) (series []prompb.TimeSeries) {
 	tsMillis := TimeToMilliseconds(ts)
 
@@ -188,6 +223,47 @@ func GenerateHistogramSeries(name string, ts time.Time, i uint32, floatHistogram
 	return
 }
 
+func GenerateSeriesV2(name string, ts time.Time, additionalLabels ...prompb.Label) (symbols []string, series []writev2.TimeSeries, vector model.Vector) {
+	tsMillis := TimeToMilliseconds(ts)
+	value := rand.Float64()
+
+	st := writev2.NewSymbolTable()
+	lbs := labels.Labels{{Name: labels.MetricName, Value: name}}
+
+	for _, label := range additionalLabels {
+		lbs = append(lbs, labels.Label{
+			Name:  label.Name,
+			Value: label.Value,
+		})
+	}
+	series = append(series, writev2.TimeSeries{
+		// Generate the series
+		LabelsRefs: st.SymbolizeLabels(lbs, nil),
+		Samples: []writev2.Sample{
+			{Value: value, Timestamp: tsMillis},
+		},
+		Metadata: writev2.Metadata{
+			Type: writev2.Metadata_METRIC_TYPE_GAUGE,
+		},
+	})
+	symbols = st.Symbols()
+
+	// Generate the expected vector when querying it
+	metric := model.Metric{}
+	metric[labels.MetricName] = model.LabelValue(name)
+	for _, lbl := range additionalLabels {
+		metric[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value)
+	}
+
+	vector = append(vector, &model.Sample{
+		Metric:    metric,
+		Value:     model.SampleValue(value),
+		Timestamp: model.Time(tsMillis),
+	})
+
+	return
+}
+
 func GenerateSeriesWithSamples(
 	name string,
 	startTime time.Time,
diff --git a/integration/e2ecortex/client.go b/integration/e2ecortex/client.go
index bc53f4dc58..2692bd3ddd 100644
--- a/integration/e2ecortex/client.go
+++ b/integration/e2ecortex/client.go
@@ -24,13 +24,13 @@ import (
 	"github.com/prometheus/prometheus/model/labels"
 	"github.com/prometheus/prometheus/model/rulefmt"
 	"github.com/prometheus/prometheus/prompb"
+	writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
 	"github.com/prometheus/prometheus/storage"
 	"github.com/prometheus/prometheus/storage/remote"
-	yaml "gopkg.in/yaml.v3"
-
 	"go.opentelemetry.io/collector/pdata/pcommon"
 	"go.opentelemetry.io/collector/pdata/pmetric"
 	"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
+	yaml "gopkg.in/yaml.v3"
 
 	"github.com/cortexproject/cortex/pkg/ruler"
 	"github.com/cortexproject/cortex/pkg/util/backoff"
@@ -114,6 +114,39 @@ func NewPromQueryClient(address string) (*Client, error) {
 	return c, nil
 }
 
+// PushV2 the input timeseries to the remote endpoint
+func (c *Client) PushV2(symbols []string, timeseries []writev2.TimeSeries) (*http.Response, error) {
+	// Create write request
+	data, err := proto.Marshal(&writev2.Request{Symbols: symbols, Timeseries: timeseries})
+	if err != nil {
+		return nil, err
+	}
+
+	// Create HTTP request
+	compressed := snappy.Encode(nil, data)
+	req, err := http.NewRequest("POST", fmt.Sprintf("http://%s/api/prom/push", c.distributorAddress), bytes.NewReader(compressed))
+	if err != nil {
+		return nil, err
+	}
+
+	req.Header.Add("Content-Encoding", "snappy")
+	req.Header.Set("Content-Type", "application/x-protobuf;proto=io.prometheus.write.v2.Request")
+	req.Header.Set("X-Prometheus-Remote-Write-Version", "2.0.0")
+	req.Header.Set("X-Scope-OrgID", c.orgID)
+
+	ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
+	defer cancel()
+
+	// Execute HTTP request
+	res, err := c.httpClient.Do(req.WithContext(ctx))
+	if err != nil {
+		return nil, err
+	}
+
+	defer res.Body.Close()
+	return res, nil
+}
+
 // Push the input timeseries to the remote endpoint
 func (c *Client) Push(timeseries []prompb.TimeSeries) (*http.Response, error) {
 	// Create write request
@@ -356,6 +389,11 @@ func (c *Client) Query(query string, ts time.Time) (model.Value, error) {
 	return value, err
 }
 
+func (c *Client) Metadata(name, limit string) (map[string][]promv1.Metadata, error) {
+	metadata, err := c.querierClient.Metadata(context.Background(), name, limit)
+	return metadata, err
+}
+
 // QueryExemplars runs an exemplars query
 func (c *Client) QueryExemplars(query string, start, end time.Time) ([]promv1.ExemplarQueryResult, error) {
 	ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
diff --git a/integration/remote_write_v2_test.go b/integration/remote_write_v2_test.go
new file mode 100644
index 0000000000..775ccb98bb
--- /dev/null
+++ b/integration/remote_write_v2_test.go
@@ -0,0 +1,211 @@
+//go:build integration_remote_write_v2
+// +build integration_remote_write_v2
+
+package integration
+
+import (
+	"math/rand"
+	"net/http"
+	"path"
+	"testing"
+	"time"
+
+	"github.com/prometheus/common/model"
+	"github.com/prometheus/prometheus/prompb"
+	writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
+	"github.com/prometheus/prometheus/tsdb/tsdbutil"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	"github.com/cortexproject/cortex/integration/e2e"
+	e2edb "github.com/cortexproject/cortex/integration/e2e/db"
+	"github.com/cortexproject/cortex/integration/e2ecortex"
+	"github.com/cortexproject/cortex/pkg/storage/tsdb"
+)
+
+func TestIngest(t *testing.T) {
+	const blockRangePeriod = 5 * time.Second
+
+	s, err := e2e.NewScenario(networkName)
+	require.NoError(t, err)
+	defer s.Close()
+
+	// Start dependencies.
+	consul := e2edb.NewConsulWithName("consul")
+	require.NoError(t, s.StartAndWaitReady(consul))
+
+	flags := mergeFlags(
+		AlertmanagerLocalFlags(),
+		map[string]string{
+			"-store.engine":                                     blocksStorageEngine,
+			"-blocks-storage.backend":                           "filesystem",
+			"-blocks-storage.tsdb.head-compaction-interval":     "4m",
+			"-blocks-storage.bucket-store.sync-interval":        "15m",
+			"-blocks-storage.bucket-store.index-cache.backend":  tsdb.IndexCacheBackendInMemory,
+			"-blocks-storage.bucket-store.bucket-index.enabled": "true",
+			"-querier.query-store-for-labels-enabled":           "true",
+			"-blocks-storage.tsdb.block-ranges-period":          blockRangePeriod.String(),
+			"-blocks-storage.tsdb.ship-interval":                "1s",
+			"-blocks-storage.tsdb.retention-period":             ((blockRangePeriod * 2) - 1).String(),
+			"-blocks-storage.tsdb.enable-native-histograms":     "true",
+			// Ingester.
+			"-ring.store":      "consul",
+			"-consul.hostname": consul.NetworkHTTPEndpoint(),
+			// Distributor.
+			"-distributor.replication-factor": "1",
+			// Store-gateway.
+			"-store-gateway.sharding-enabled": "false",
+			// alert manager
+			"-alertmanager.web.external-url": "http://localhost/alertmanager",
+		},
+	)
+
+	// make alert manager config dir
+	require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{}))
+
+	path := path.Join(s.SharedDir(), "cortex-1")
+
+	flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path})
+	// Start Cortex replicas.
+	cortex := e2ecortex.NewSingleBinary("cortex", flags, "")
+	require.NoError(t, s.StartAndWaitReady(cortex))
+
+	// Wait until Cortex replicas have updated the ring state.
+	require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(float64(512)), "cortex_ring_tokens_total"))
+
+	c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1")
+	require.NoError(t, err)
+
+	now := time.Now()
+
+	// series push
+	symbols1, series, expectedVector := e2e.GenerateSeriesV2("test_series", now, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "foo", Value: "bar"})
+	res, err := c.PushV2(symbols1, series)
+	require.NoError(t, err)
+	require.Equal(t, 200, res.StatusCode)
+	testPushHeader(t, res.Header, "1", "0", "0")
+
+	// sample
+	result, err := c.Query("test_series", now)
+	require.NoError(t, err)
+	assert.Equal(t, expectedVector, result.(model.Vector))
+
+	// metadata
+	metadata, err := c.Metadata("test_series", "")
+	require.NoError(t, err)
+	require.Equal(t, 1, len(metadata["test_series"]))
+
+	// histogram
+	histogramIdx := rand.Uint32()
+	symbols2, histogramSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, false, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "false"})
+	res, err = c.PushV2(symbols2, histogramSeries)
+	require.NoError(t, err)
+	require.Equal(t, 200, res.StatusCode)
+	testPushHeader(t, res.Header, "1", "1", "0")
+
+	symbols3, histogramFloatSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, false, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "true"})
+	res, err = c.PushV2(symbols3, histogramFloatSeries)
+	require.NoError(t, err)
+	require.Equal(t, 200, res.StatusCode)
+	testPushHeader(t, res.Header, "1", "1", "0")
+
+	testHistogramTimestamp := now.Add(blockRangePeriod * 2)
+	expectedHistogram := tsdbutil.GenerateTestHistogram(int(histogramIdx))
+	result, err = c.Query(`test_histogram`, testHistogramTimestamp)
+	require.NoError(t, err)
+	require.Equal(t, model.ValVector, result.Type())
+	v := result.(model.Vector)
+	require.Equal(t, 2, v.Len())
+	for _, s := range v {
+		require.NotNil(t, s.Histogram)
+		require.Equal(t, float64(expectedHistogram.Count), float64(s.Histogram.Count))
+		require.Equal(t, float64(expectedHistogram.Sum), float64(s.Histogram.Sum))
+	}
+}
+
+func TestExemplar(t *testing.T) {
+	s, err := e2e.NewScenario(networkName)
+	require.NoError(t, err)
+	defer s.Close()
+
+	// Start dependencies.
+	consul := e2edb.NewConsulWithName("consul")
+	require.NoError(t, s.StartAndWaitReady(consul))
+
+	flags := mergeFlags(
+		AlertmanagerLocalFlags(),
+		map[string]string{
+			"-store.engine":                                     blocksStorageEngine,
+			"-blocks-storage.backend":                           "filesystem",
+			"-blocks-storage.tsdb.head-compaction-interval":     "4m",
+			"-blocks-storage.bucket-store.sync-interval":        "15m",
+			"-blocks-storage.bucket-store.index-cache.backend":  tsdb.IndexCacheBackendInMemory,
+			"-blocks-storage.bucket-store.bucket-index.enabled": "true",
+			"-querier.query-store-for-labels-enabled":           "true",
+			"-blocks-storage.tsdb.ship-interval":                "1s",
+			"-blocks-storage.tsdb.enable-native-histograms":     "true",
+			// Ingester.
+			"-ring.store":             "consul",
+			"-consul.hostname":        consul.NetworkHTTPEndpoint(),
+			"-ingester.max-exemplars": "100",
+			// Distributor.
+			"-distributor.replication-factor": "1",
+			// Store-gateway.
+			"-store-gateway.sharding-enabled": "false",
+			// alert manager
+			"-alertmanager.web.external-url": "http://localhost/alertmanager",
+		},
+	)
+
+	// make alert manager config dir
+	require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{}))
+
+	path := path.Join(s.SharedDir(), "cortex-1")
+
+	flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path})
+	// Start Cortex replicas.
+	cortex := e2ecortex.NewSingleBinary("cortex", flags, "")
+	require.NoError(t, s.StartAndWaitReady(cortex))
+
+	// Wait until Cortex replicas have updated the ring state.
+	require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(float64(512)), "cortex_ring_tokens_total"))
+
+	c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1")
+	require.NoError(t, err)
+
+	now := time.Now()
+	tsMillis := e2e.TimeToMilliseconds(now)
+
+	symbols := []string{"", "__name__", "test_metric", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"}
+	timeseries := []writev2.TimeSeries{
+		{
+			LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Symbolized writeRequestFixture.Timeseries[0].Labels
+			Metadata: writev2.Metadata{
+				Type: writev2.Metadata_METRIC_TYPE_COUNTER, // writeV2RequestSeries1Metadata.Type.
+
+				HelpRef: 15, // Symbolized writeV2RequestSeries1Metadata.Help.
+				UnitRef: 16, // Symbolized writeV2RequestSeries1Metadata.Unit.
+			},
+			Samples:   []writev2.Sample{{Value: 1, Timestamp: tsMillis}},
+			Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: tsMillis}},
+		},
+	}
+
+	res, err := c.PushV2(symbols, timeseries)
+	require.NoError(t, err)
+	require.Equal(t, 200, res.StatusCode)
+	testPushHeader(t, res.Header, "1", "0", "1")
+
+	start := time.Now().Add(-time.Minute)
+	end := now.Add(time.Minute)
+
+	exemplars, err := c.QueryExemplars("test_metric", start, end)
+	require.NoError(t, err)
+	require.Equal(t, 1, len(exemplars))
+}
+
+func testPushHeader(t *testing.T, header http.Header, expectedSamples, expectedHistogram, expectedExemplars string) {
+	require.Equal(t, expectedSamples, header.Get("X-Prometheus-Remote-Write-Samples-Written"))
+	require.Equal(t, expectedHistogram, header.Get("X-Prometheus-Remote-Write-Histograms-Written"))
+	require.Equal(t, expectedExemplars, header.Get("X-Prometheus-Remote-Write-Exemplars-Written"))
+}
diff --git a/pkg/cortex/modules_test.go b/pkg/cortex/modules_test.go
index 7316e07274..ae5fc64582 100644
--- a/pkg/cortex/modules_test.go
+++ b/pkg/cortex/modules_test.go
@@ -13,6 +13,7 @@ import (
 	"github.com/weaveworks/common/server"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 )
 
 func changeTargetConfig(c *Config) {
@@ -160,6 +161,10 @@ func TestCortex_InitRulerStorage(t *testing.T) {
 
 type myPusher struct{}
 
+func (p *myPusher) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+	return nil, nil
+}
+
 func (p *myPusher) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) {
 	return nil, nil
 }
diff --git a/pkg/cortexpbv2/codecv2.go b/pkg/cortexpbv2/codecv2.go
new file mode 100644
index 0000000000..9fe7b434e0
--- /dev/null
+++ b/pkg/cortexpbv2/codecv2.go
@@ -0,0 +1,160 @@
+package cortexpbv2
+
+import (
+	"github.com/prometheus/prometheus/model/histogram"
+	"github.com/prometheus/prometheus/model/labels"
+
+	"github.com/cortexproject/cortex/pkg/cortexpb"
+)
+
+// ToLabels return model labels.Labels from timeseries' remote labels.
+func (t TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels {
+	return desymbolizeLabels(b, t.GetLabelsRefs(), symbols)
+}
+
+// ToLabels return model labels.Labels from exemplar remote labels.
+func (e Exemplar) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels {
+	return desymbolizeLabels(b, e.GetLabelsRefs(), symbols)
+}
+
+func (m Metadata) ToV1Metadata(name string, symbols []string) *cortexpb.MetricMetadata {
+	typ := cortexpb.UNKNOWN
+
+	switch m.Type {
+	case METRIC_TYPE_COUNTER:
+		typ = cortexpb.COUNTER
+	case METRIC_TYPE_GAUGE:
+		typ = cortexpb.GAUGE
+	case METRIC_TYPE_HISTOGRAM:
+		typ = cortexpb.HISTOGRAM
+	case METRIC_TYPE_GAUGEHISTOGRAM:
+		typ = cortexpb.GAUGEHISTOGRAM
+	case METRIC_TYPE_SUMMARY:
+		typ = cortexpb.SUMMARY
+	case METRIC_TYPE_INFO:
+		typ = cortexpb.INFO
+	case METRIC_TYPE_STATESET:
+		typ = cortexpb.STATESET
+	}
+
+	return &cortexpb.MetricMetadata{
+		Type:             typ,
+		MetricFamilyName: name,
+		Unit:             symbols[m.UnitRef],
+		Help:             symbols[m.HelpRef],
+	}
+}
+
+// desymbolizeLabels decodes label references, with given symbols to labels.
+func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels {
+	b.Reset()
+	for i := 0; i < len(labelRefs); i += 2 {
+		b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]])
+	}
+	b.Sort()
+	return b.Labels()
+}
+
+// IsFloatHistogram returns true if the histogram is float.
+func (h Histogram) IsFloatHistogram() bool {
+	_, ok := h.GetCount().(*Histogram_CountFloat)
+	return ok
+}
+
+// FloatHistogramProtoToFloatHistogram extracts a float Histogram from the provided proto message.
+func FloatHistogramProtoToFloatHistogram(h Histogram) *histogram.FloatHistogram {
+	if !h.IsFloatHistogram() {
+		panic("FloatHistogramProtoToFloatHistogram called with an integer histogram")
+	}
+
+	return &histogram.FloatHistogram{
+		CounterResetHint: histogram.CounterResetHint(h.ResetHint),
+		Schema:           h.Schema,
+		ZeroThreshold:    h.ZeroThreshold,
+		ZeroCount:        h.GetZeroCountFloat(),
+		Count:            h.GetCountFloat(),
+		Sum:              h.Sum,
+		PositiveSpans:    spansProtoToSpans(h.GetPositiveSpans()),
+		PositiveBuckets:  h.GetPositiveCounts(),
+		NegativeSpans:    spansProtoToSpans(h.GetNegativeSpans()),
+		NegativeBuckets:  h.GetNegativeCounts(),
+		CustomValues:     h.GetCustomValues(),
+	}
+}
+
+func HistogramProtoToHistogram(h Histogram) *histogram.Histogram {
+	if h.IsFloatHistogram() {
+		panic("HistogramProtoToHistogram called with a float histogram")
+	}
+
+	return &histogram.Histogram{
+		CounterResetHint: histogram.CounterResetHint(h.ResetHint),
+		Schema:           h.Schema,
+		ZeroThreshold:    h.ZeroThreshold,
+		ZeroCount:        h.GetZeroCountInt(),
+		Count:            h.GetCountInt(),
+		Sum:              h.Sum,
+		PositiveSpans:    spansProtoToSpans(h.GetPositiveSpans()),
+		PositiveBuckets:  h.GetPositiveDeltas(),
+		NegativeSpans:    spansProtoToSpans(h.GetNegativeSpans()),
+		NegativeBuckets:  h.GetNegativeDeltas(),
+		CustomValues:     h.GetCustomValues(),
+	}
+}
+
+// HistogramToHistogramProto converts a (normal integer) Histogram to its protobuf message type.
+// Changed from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L709-L723
+func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) Histogram {
+	return Histogram{
+		Count:          &Histogram_CountInt{CountInt: h.Count},
+		Sum:            h.Sum,
+		Schema:         h.Schema,
+		ZeroThreshold:  h.ZeroThreshold,
+		ZeroCount:      &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
+		NegativeSpans:  spansToSpansProto(h.NegativeSpans),
+		NegativeDeltas: h.NegativeBuckets,
+		PositiveSpans:  spansToSpansProto(h.PositiveSpans),
+		PositiveDeltas: h.PositiveBuckets,
+		ResetHint:      Histogram_ResetHint(h.CounterResetHint),
+		Timestamp:      timestamp,
+		CustomValues:   h.CustomValues,
+	}
+}
+
+// FloatHistogramToHistogramProto converts a float Histogram to a normal
+// Histogram's protobuf message type.
+// Changed from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L725-L739
+func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogram) Histogram {
+	return Histogram{
+		Count:          &Histogram_CountFloat{CountFloat: fh.Count},
+		Sum:            fh.Sum,
+		Schema:         fh.Schema,
+		ZeroThreshold:  fh.ZeroThreshold,
+		ZeroCount:      &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
+		NegativeSpans:  spansToSpansProto(fh.NegativeSpans),
+		NegativeCounts: fh.NegativeBuckets,
+		PositiveSpans:  spansToSpansProto(fh.PositiveSpans),
+		PositiveCounts: fh.PositiveBuckets,
+		ResetHint:      Histogram_ResetHint(fh.CounterResetHint),
+		Timestamp:      timestamp,
+		CustomValues:   fh.CustomValues,
+	}
+}
+
+func spansProtoToSpans(s []BucketSpan) []histogram.Span {
+	spans := make([]histogram.Span, len(s))
+	for i := 0; i < len(s); i++ {
+		spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
+	}
+
+	return spans
+}
+
+func spansToSpansProto(s []histogram.Span) []BucketSpan {
+	spans := make([]BucketSpan, len(s))
+	for i := 0; i < len(s); i++ {
+		spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
+	}
+
+	return spans
+}
diff --git a/pkg/cortexpbv2/compatv2.go b/pkg/cortexpbv2/compatv2.go
new file mode 100644
index 0000000000..13d42d000f
--- /dev/null
+++ b/pkg/cortexpbv2/compatv2.go
@@ -0,0 +1,88 @@
+package cortexpbv2
+
+import (
+	"github.com/prometheus/prometheus/model/labels"
+	writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
+
+	"github.com/cortexproject/cortex/pkg/cortexpb"
+)
+
+// ToWriteRequestV2 converts matched slices of Labels, Samples, and Histograms into a WriteRequest proto.
+func ToWriteRequestV2(lbls []labels.Labels, samples []Sample, histograms []Histogram, metadata []Metadata, source WriteRequest_SourceEnum, help ...string) *WriteRequest {
+	st := writev2.NewSymbolTable()
+	labelRefs := make([][]uint32, 0, len(lbls))
+	for _, lbl := range lbls {
+		labelRefs = append(labelRefs, st.SymbolizeLabels(lbl, nil))
+	}
+
+	for _, s := range help {
+		st.Symbolize(s)
+	}
+
+	symbols := st.Symbols()
+
+	req := &WriteRequest{
+		Timeseries: PreallocTimeseriesV2SliceFromPool(),
+		Symbols:    symbols,
+		Source:     source,
+	}
+
+	i := 0
+	for i < len(samples) || i < len(histograms) || i < len(metadata) {
+		ts := TimeseriesV2FromPool()
+		ts.LabelsRefs = labelRefs[i]
+		if i < len(samples) {
+			ts.Samples = append(ts.Samples, samples[i])
+		}
+		if i < len(histograms) {
+			ts.Histograms = append(ts.Histograms, histograms[i])
+		}
+		if i < len(metadata) {
+			ts.Metadata = metadata[i]
+		}
+		i++
+		req.Timeseries = append(req.Timeseries, PreallocTimeseriesV2{TimeSeries: ts})
+	}
+
+	return req
+}
+
+func GetLabelRefsFromLabelAdapters(symbols []string, las []cortexpb.LabelAdapter) []uint32 {
+	var ret []uint32
+
+	symbolMap := map[string]uint32{}
+	for idx, symbol := range symbols {
+		symbolMap[symbol] = uint32(idx)
+	}
+
+	for _, lb := range las {
+		if idx, ok := symbolMap[lb.Name]; ok {
+			ret = append(ret, idx)
+		}
+		if idx, ok := symbolMap[lb.Value]; ok {
+			ret = append(ret, idx)
+		}
+	}
+
+	return ret
+}
+
+func GetLabelsRefsFromLabels(symbols []string, lbs labels.Labels) []uint32 {
+	var ret []uint32
+
+	symbolMap := map[string]uint32{}
+	for idx, symbol := range symbols {
+		symbolMap[symbol] = uint32(idx)
+	}
+
+	for _, lb := range lbs {
+		if idx, ok := symbolMap[lb.Name]; ok {
+			ret = append(ret, idx)
+		}
+		if idx, ok := symbolMap[lb.Value]; ok {
+			ret = append(ret, idx)
+		}
+	}
+
+	return ret
+}
diff --git a/pkg/cortexpbv2/compatv2_test.go b/pkg/cortexpbv2/compatv2_test.go
new file mode 100644
index 0000000000..a6d809e0a4
--- /dev/null
+++ b/pkg/cortexpbv2/compatv2_test.go
@@ -0,0 +1,71 @@
+package cortexpbv2
+
+import (
+	"testing"
+
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/stretchr/testify/require"
+
+	"github.com/cortexproject/cortex/pkg/cortexpb"
+)
+
+func Test_GetLabelRefsFromLabelAdapters(t *testing.T) {
+	tests := []struct {
+		symbols            []string
+		lbs                []cortexpb.LabelAdapter
+		expectedSeriesRefs []uint32
+	}{
+		{
+			symbols:            []string{"", "__name__", "test_metric", "foo", "bar", "baz", "qux"},
+			lbs:                []cortexpb.LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "foo", Value: "bar"}},
+			expectedSeriesRefs: []uint32{1, 2, 3, 4},
+		},
+		{
+			symbols:            []string{"", "__name__", "test_metric", "foo", "bar", "baz", "qux"},
+			lbs:                []cortexpb.LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "baz", Value: "qux"}},
+			expectedSeriesRefs: []uint32{1, 2, 5, 6},
+		},
+		{
+			symbols:            []string{"", "__name__", "test_metric", "foo", "bar", "baz", "qux", "1"},
+			lbs:                []cortexpb.LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "baz", Value: "qux"}, {Name: "qux", Value: "1"}},
+			expectedSeriesRefs: []uint32{1, 2, 5, 6, 6, 7},
+		},
+	}
+
+	for _, test := range tests {
+		require.Equal(t, test.expectedSeriesRefs, GetLabelRefsFromLabelAdapters(test.symbols, test.lbs))
+	}
+}
+
+func Test_GetLabelsRefsFromLabels(t *testing.T) {
+	tests := []struct {
+		symbols            []string
+		lbs                labels.Labels
+		expectedSeriesRefs []uint32
+	}{
+		{
+			symbols:            []string{"", "__name__", "test_metric", "foo", "bar", "baz", "qux"},
+			lbs:                labels.Labels{labels.Label{Name: "__name__", Value: "test_metric"}, labels.Label{Name: "foo", Value: "bar"}},
+			expectedSeriesRefs: []uint32{1, 2, 3, 4},
+		},
+		{
+			symbols:            []string{"", "__name__", "test_metric", "foo", "bar", "baz", "qux"},
+			lbs:                labels.Labels{labels.Label{Name: "__name__", Value: "test_metric"}, labels.Label{Name: "baz", Value: "qux"}},
+			expectedSeriesRefs: []uint32{1, 2, 5, 6},
+		},
+		{
+			symbols:            []string{"", "__name__", "test_metric", "foo", "bar", "baz", "qux", "1"},
+			lbs:                labels.Labels{labels.Label{Name: "__name__", Value: "test_metric"}, labels.Label{Name: "baz", Value: "qux"}, labels.Label{Name: "qux", Value: "1"}},
+			expectedSeriesRefs: []uint32{1, 2, 5, 6, 6, 7},
+		},
+		{
+			symbols:            []string{"", "a help for testmetric", "a help for testmetric2"},
+			lbs:                labels.Labels{},
+			expectedSeriesRefs: nil,
+		},
+	}
+
+	for _, test := range tests {
+		require.Equal(t, test.expectedSeriesRefs, GetLabelsRefsFromLabels(test.symbols, test.lbs))
+	}
+}
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 8f6b97aa5d..5cd0c13e88 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -20,6 +20,7 @@ import (
 	"github.com/prometheus/common/model"
 	"github.com/prometheus/prometheus/model/labels"
 	"github.com/prometheus/prometheus/model/relabel"
+	writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
 	"github.com/prometheus/prometheus/scrape"
 	"github.com/prometheus/prometheus/storage"
 	"github.com/weaveworks/common/httpgrpc"
@@ -28,6 +29,7 @@ import (
 	"go.uber.org/atomic"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/ha"
 	"github.com/cortexproject/cortex/pkg/ingester"
 	ingester_client "github.com/cortexproject/cortex/pkg/ingester/client"
@@ -44,7 +46,8 @@ import (
 )
 
 var (
-	emptyPreallocSeries = cortexpb.PreallocTimeseries{}
+	emptyPreallocSeriesV2 = cortexpbv2.PreallocTimeseriesV2{}
+	emptyPreallocSeries   = cortexpb.PreallocTimeseries{}
 
 	supportedShardingStrategies = []string{util.ShardingStrategyDefault, util.ShardingStrategyShuffle}
 
@@ -513,6 +516,17 @@ func shardByAllLabels(userID string, labels []cortexpb.LabelAdapter) uint32 {
 	return h
 }
 
+// Remove the label labelname from a slice of LabelPairs if it exists.
+func removeLabelV2(labelName string, labels *labels.Labels) {
+	for i := 0; i < len(*labels); i++ {
+		pair := (*labels)[i]
+		if pair.Name == labelName {
+			*labels = append((*labels)[:i], (*labels)[i+1:]...)
+			return
+		}
+	}
+}
+
 // Remove the label labelname from a slice of LabelPairs if it exists.
 func removeLabel(labelName string, labels *[]cortexpb.LabelAdapter) {
 	for i := 0; i < len(*labels); i++ {
@@ -617,6 +631,432 @@ func (d *Distributor) validateSeries(ts cortexpb.PreallocTimeseries, userID stri
 		nil
 }
 
+func (d *Distributor) prepareSeriesKeysV2(ctx context.Context, req *cortexpbv2.WriteRequest, userID string, limits *validation.Limits, b labels.ScratchBuilder, st *writev2.SymbolsTable, removeReplica bool) ([]uint32, []cortexpbv2.PreallocTimeseriesV2, int64, int64, int64, int64, error, error) {
+	pSpan, _ := opentracing.StartSpanFromContext(ctx, "prepareSeriesKeysV2")
+	defer pSpan.Finish()
+	// For each timeseries or samples, we compute a hash to distribute across ingesters;
+	// check each sample/metadata and discard if outside limits.
+	validatedTimeseries := make([]cortexpbv2.PreallocTimeseriesV2, 0, len(req.Timeseries))
+	seriesKeys := make([]uint32, 0, len(req.Timeseries))
+	validatedFloatSamples := 0
+	validatedHistogramSamples := 0
+	validatedExemplars := 0
+	validatedMetadata := 0
+
+	var firstPartialErr error
+
+	latestSampleTimestampMs := int64(0)
+	defer func() {
+		// Update this metric even in case of errors.
+		if latestSampleTimestampMs > 0 {
+			d.latestSeenSampleTimestampPerUser.WithLabelValues(userID).Set(float64(latestSampleTimestampMs) / 1000)
+		}
+	}()
+
+	// For each timeseries, compute a hash to distribute across ingesters;
+	// check each sample and discard if outside limits.
+	skipLabelNameValidation := d.cfg.SkipLabelNameValidation || req.GetSkipLabelNameValidation()
+	for _, ts := range req.Timeseries {
+		// Use timestamp of latest sample in the series. If samples for series are not ordered, metric for user may be wrong.
+		if len(ts.Samples) > 0 {
+			latestSampleTimestampMs = max(latestSampleTimestampMs, ts.Samples[len(ts.Samples)-1].Timestamp)
+		}
+		if len(ts.Histograms) > 0 {
+			latestSampleTimestampMs = max(latestSampleTimestampMs, ts.Histograms[len(ts.Histograms)-1].Timestamp)
+		}
+
+		lbs := ts.ToLabels(&b, req.Symbols)
+
+		if mrc := limits.MetricRelabelConfigs; len(mrc) > 0 {
+			l, _ := relabel.Process(lbs, mrc...)
+			if len(l) == 0 {
+				// all labels are gone, samples will be discarded
+				d.validateMetrics.DiscardedSamples.WithLabelValues(
+					validation.DroppedByRelabelConfiguration,
+					userID,
+				).Add(float64(len(ts.Samples) + len(ts.Histograms)))
+
+				// all labels are gone, exemplars will be discarded
+				d.validateMetrics.DiscardedExemplars.WithLabelValues(
+					validation.DroppedByRelabelConfiguration,
+					userID,
+				).Add(float64(len(ts.Exemplars)))
+				continue
+			}
+			lbs = l
+		}
+
+		// If we found both the cluster and replica labels, we only want to include the cluster label when
+		// storing series in Cortex. If we kept the replica label we would end up with another series for the same
+		// series we're trying to dedupe when HA tracking moves over to a different replica.
+		if removeReplica {
+			removeLabelV2(limits.HAReplicaLabel, &lbs)
+		}
+
+		for _, labelName := range limits.DropLabels {
+			removeLabelV2(labelName, &lbs)
+		}
+		if len(lbs) == 0 {
+			d.validateMetrics.DiscardedSamples.WithLabelValues(
+				validation.DroppedByUserConfigurationOverride,
+				userID,
+			).Add(float64(len(ts.Samples) + len(ts.Histograms)))
+
+			d.validateMetrics.DiscardedExemplars.WithLabelValues(
+				validation.DroppedByUserConfigurationOverride,
+				userID,
+			).Add(float64(len(ts.Exemplars)))
+			continue
+		}
+
+		// update label refs
+		ts.LabelsRefs = st.SymbolizeLabels(lbs, nil)
+		las := cortexpb.FromLabelsToLabelAdapters(lbs)
+
+		// We rely on sorted labels in different places:
+		// 1) When computing token for labels, and sharding by all labels. Here different order of labels returns
+		// different tokens, which is bad.
+		// 2) In validation code, when checking for duplicate label names. As duplicate label names are rejected
+		// later in the validation phase, we ignore them here.
+		sortLabelsIfNeeded(las)
+
+		// Generate the sharding token based on the series labels without the HA replica
+		// label and dropped labels (if any)
+		seriesKey, err := d.tokenForLabels(userID, las)
+		if err != nil {
+			return nil, nil, 0, 0, 0, 0, nil, err
+		}
+
+		validatedSeries, validationErr := d.validateSeriesV2(ts, las, req.Symbols, userID, skipLabelNameValidation, limits, b, st)
+
+		// Errors in validation are considered non-fatal, as one series in a request may contain
+		// invalid data but all the remaining series could be perfectly valid.
+		if validationErr != nil && firstPartialErr == nil {
+			// The series labels may be retained by validationErr but that's not a problem for this
+			// use case because we format it calling Error() and then we discard it.
+			firstPartialErr = httpgrpc.Errorf(http.StatusBadRequest, validationErr.Error())
+		}
+
+		if ts.Metadata.Type != cortexpbv2.METRIC_TYPE_UNSPECIFIED {
+			// since metadata is attached, count only metadata that is not METRIC_TYPE_UNSPECIFIED.
+			validatedMetadata++
+		}
+
+		// validateSeriesV2 would have returned an emptyPreallocSeriesV2 if there were no valid samples.
+		if validatedSeries == emptyPreallocSeriesV2 {
+			continue
+		}
+
+		seriesKeys = append(seriesKeys, seriesKey)
+		validatedTimeseries = append(validatedTimeseries, validatedSeries)
+		validatedFloatSamples += len(ts.Samples)
+		validatedHistogramSamples += len(ts.Histograms)
+		validatedExemplars += len(ts.Exemplars)
+	}
+
+	return seriesKeys, validatedTimeseries, int64(validatedMetadata), int64(validatedFloatSamples), int64(validatedHistogramSamples), int64(validatedExemplars), firstPartialErr, nil
+}
+
+func (d *Distributor) doBatchV2(ctx context.Context, req *cortexpbv2.WriteRequest, subRing ring.ReadRing, keys []uint32, validatedTimeseries []cortexpbv2.PreallocTimeseriesV2, userID string, stats *WriteStats) error {
+	span, _ := opentracing.StartSpanFromContext(ctx, "doBatchV2")
+	defer span.Finish()
+
+	// Use a background context to make sure all ingesters get samples even if we return early
+	localCtx, cancel := context.WithTimeout(context.Background(), d.cfg.RemoteTimeout)
+	localCtx = user.InjectOrgID(localCtx, userID)
+	if sp := opentracing.SpanFromContext(ctx); sp != nil {
+		localCtx = opentracing.ContextWithSpan(localCtx, sp)
+	}
+	// Get any HTTP headers that are supposed to be added to logs and add to localCtx for later use
+	if headerMap := util_log.HeaderMapFromContext(ctx); headerMap != nil {
+		localCtx = util_log.ContextWithHeaderMap(localCtx, headerMap)
+	}
+	// Get clientIP(s) from Context and add it to localCtx
+	source := util.GetSourceIPsFromOutgoingCtx(ctx)
+	localCtx = util.AddSourceIPsToOutgoingContext(localCtx, source)
+
+	op := ring.WriteNoExtend
+	if d.cfg.ExtendWrites {
+		op = ring.Write
+	}
+
+	return ring.DoBatch(ctx, op, subRing, keys, func(ingester ring.InstanceDesc, indexes []int) error {
+		timeseries := make([]cortexpbv2.PreallocTimeseriesV2, 0, len(indexes))
+
+		for _, i := range indexes {
+			timeseries = append(timeseries, validatedTimeseries[i])
+		}
+
+		return d.sendV2(localCtx, req.Symbols, ingester, timeseries, req.Source, stats)
+	}, func() {
+		cortexpbv2.ReuseSlice(req.Timeseries)
+		cancel()
+	})
+}
+
+func (d *Distributor) sendV2(ctx context.Context, symbols []string, ingester ring.InstanceDesc, timeseries []cortexpbv2.PreallocTimeseriesV2, source cortexpbv2.WriteRequest_SourceEnum, stats *WriteStats) error {
+	h, err := d.ingesterPool.GetClientFor(ingester.Addr)
+	if err != nil {
+		return err
+	}
+
+	id, err := d.ingestersRing.GetInstanceIdByAddr(ingester.Addr)
+	if err != nil {
+		level.Warn(d.log).Log("msg", "instance not found in the ring", "addr", ingester.Addr, "err", err)
+	}
+
+	c := h.(ingester_client.HealthAndIngesterClient)
+
+	req := cortexpbv2.PreallocWriteRequestV2FromPool()
+	req.Symbols = symbols
+	req.Timeseries = timeseries
+	req.Source = source
+
+	resp, err := c.PushPreAllocV2(ctx, req)
+	if err == nil {
+		cortexpbv2.ReuseWriteRequestV2(req)
+	}
+
+	if len(timeseries) > 0 {
+		d.ingesterAppends.WithLabelValues(id, typeSamples).Inc()
+		if err != nil {
+			d.ingesterAppendFailures.WithLabelValues(id, typeSamples, getErrorStatus(err)).Inc()
+		}
+
+		metadataAppend := false
+		for _, ts := range timeseries {
+			if ts.Metadata.Type != cortexpbv2.METRIC_TYPE_UNSPECIFIED {
+				metadataAppend = true
+				break
+			}
+		}
+		if metadataAppend {
+			d.ingesterAppends.WithLabelValues(id, typeMetadata).Inc()
+			if err != nil {
+				d.ingesterAppendFailures.WithLabelValues(id, typeMetadata, getErrorStatus(err)).Inc()
+			}
+		}
+	}
+
+	if resp != nil {
+		// track stats
+		stats.SetSamples(resp.Samples)
+		stats.SetHistograms(resp.Histograms)
+		stats.SetExemplars(resp.Exemplars)
+	}
+
+	return err
+}
+
+// Validates a single series from a write request. Will remove labels if
+// any are configured to be dropped for the user ID.
+// Returns the validated series with it's labels/samples, and any error.
+// The returned error may retain the series labels.
+func (d *Distributor) validateSeriesV2(ts cortexpbv2.PreallocTimeseriesV2, seriesLabels []cortexpb.LabelAdapter, symbols []string, userID string, skipLabelNameValidation bool, limits *validation.Limits, b labels.ScratchBuilder, st *writev2.SymbolsTable) (cortexpbv2.PreallocTimeseriesV2, validation.ValidationError) {
+	d.labelsHistogram.Observe(float64(len(ts.LabelsRefs)))
+
+	if err := validation.ValidateLabels(d.validateMetrics, limits, userID, seriesLabels, skipLabelNameValidation); err != nil {
+		return emptyPreallocSeriesV2, err
+	}
+
+	var samples []cortexpbv2.Sample
+	if len(ts.Samples) > 0 {
+		// Only alloc when data present
+		samples = make([]cortexpbv2.Sample, 0, len(ts.Samples))
+		for _, s := range ts.Samples {
+			if err := validation.ValidateSampleTimestamp(d.validateMetrics, limits, userID, seriesLabels, s.Timestamp); err != nil {
+				return emptyPreallocSeriesV2, err
+			}
+			samples = append(samples, s)
+		}
+	}
+
+	var exemplars []cortexpbv2.Exemplar
+	if len(ts.Exemplars) > 0 {
+		// Only alloc when data present
+		exemplars = make([]cortexpbv2.Exemplar, 0, len(ts.Exemplars))
+		for _, e := range ts.Exemplars {
+			if err := validation.ValidateExemplarV2(d.validateMetrics, symbols, userID, seriesLabels, &e, b, st); err != nil {
+				// An exemplar validation error prevents ingesting samples
+				// in the same series object. However, because the current Prometheus
+				// remote write implementation only populates one or the other,
+				// there never will be any.
+				return emptyPreallocSeriesV2, err
+			}
+			exemplars = append(exemplars, e)
+		}
+	}
+
+	var histograms []cortexpbv2.Histogram
+	if len(ts.Histograms) > 0 {
+		// Only alloc when data present
+		histograms = make([]cortexpbv2.Histogram, 0, len(ts.Histograms))
+		for i, h := range ts.Histograms {
+			if err := validation.ValidateSampleTimestamp(d.validateMetrics, limits, userID, seriesLabels, h.Timestamp); err != nil {
+				return emptyPreallocSeriesV2, err
+			}
+			convertedHistogram, err := validation.ValidateNativeHistogramV2(d.validateMetrics, limits, userID, seriesLabels, h)
+			if err != nil {
+				return emptyPreallocSeriesV2, err
+			}
+			ts.Histograms[i] = convertedHistogram
+		}
+		histograms = append(histograms, ts.Histograms...)
+	}
+
+	// validate metadata
+	err := validation.ValidateMetadataV2(d.validateMetrics, limits, userID, symbols, &ts.Metadata, st)
+	if err != nil {
+		return emptyPreallocSeriesV2, err
+	}
+
+	return cortexpbv2.PreallocTimeseriesV2{
+		TimeSeries: &cortexpbv2.TimeSeries{
+			LabelsRefs: ts.LabelsRefs,
+			Samples:    samples,
+			Exemplars:  exemplars,
+			Histograms: histograms,
+			Metadata:   ts.Metadata,
+		},
+	}, nil
+}
+
+func (d *Distributor) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+	userID, err := tenant.TenantID(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	span, ctx := opentracing.StartSpanFromContext(ctx, "Distributor.PushV2")
+	defer span.Finish()
+
+	// We will report *this* request in the error too.
+	inflight := d.inflightPushRequests.Inc()
+	defer d.inflightPushRequests.Dec()
+
+	now := time.Now()
+	d.activeUsers.UpdateUserTimestamp(userID, now)
+
+	numFloatSamples := 0
+	numHistogramSamples := 0
+	numExemplars := 0
+	for _, ts := range req.Timeseries {
+		numFloatSamples += len(ts.Samples)
+		numHistogramSamples += len(ts.Histograms)
+		numExemplars += len(ts.Exemplars)
+	}
+
+	// Count the total samples, exemplars in, prior to validation or deduplication, for comparison with other metrics.
+	d.incomingSamples.WithLabelValues(userID, sampleMetricTypeFloat).Add(float64(numFloatSamples))
+	d.incomingSamples.WithLabelValues(userID, sampleMetricTypeHistogram).Add(float64(numHistogramSamples))
+	d.incomingExemplars.WithLabelValues(userID).Add(float64(numExemplars))
+	// Metadata is attached to each series.
+	d.incomingMetadata.WithLabelValues(userID).Add(float64(len(req.Timeseries)))
+
+	if d.cfg.InstanceLimits.MaxInflightPushRequests > 0 && inflight > int64(d.cfg.InstanceLimits.MaxInflightPushRequests) {
+		return nil, errTooManyInflightPushRequests
+	}
+
+	if d.cfg.InstanceLimits.MaxIngestionRate > 0 {
+		if rate := d.ingestionRate.Rate(); rate >= d.cfg.InstanceLimits.MaxIngestionRate {
+			return nil, errMaxSamplesPushRateLimitReached
+		}
+	}
+
+	b := labels.NewScratchBuilder(0)
+	removeReplica := false
+	// Cache user limit with overrides so we spend less CPU doing locking. See issue #4904
+	limits := d.limits.GetOverridesForUser(userID)
+
+	if limits.AcceptHASamples && len(req.Timeseries) > 0 {
+		cluster, replica := findHALabels(limits.HAReplicaLabel, limits.HAClusterLabel, cortexpb.FromLabelsToLabelAdapters(req.Timeseries[0].ToLabels(&b, req.Symbols)))
+		removeReplica, err = d.checkSample(ctx, userID, cluster, replica, limits)
+		if err != nil {
+			// Ensure the request slice is reused if the series get deduped.
+			cortexpbv2.ReuseSlice(req.Timeseries)
+
+			if errors.Is(err, ha.ReplicasNotMatchError{}) {
+				// These samples have been deduped.
+				d.dedupedSamples.WithLabelValues(userID, cluster).Add(float64(numFloatSamples + numHistogramSamples))
+				return nil, httpgrpc.Errorf(http.StatusAccepted, err.Error())
+			}
+
+			if errors.Is(err, ha.TooManyReplicaGroupsError{}) {
+				d.validateMetrics.DiscardedSamples.WithLabelValues(validation.TooManyHAClusters, userID).Add(float64(numFloatSamples + numHistogramSamples))
+				return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
+			}
+
+			return nil, err
+		}
+		// If there wasn't an error but removeReplica is false that means we didn't find both HA labels.
+		if !removeReplica { // False, Nil
+			d.nonHASamples.WithLabelValues(userID).Add(float64(numFloatSamples + numHistogramSamples))
+		}
+	}
+
+	st := writev2.NewSymbolTable()
+	seriesKeys, validatedTimeseries, validatedMetadatas, validatedFloatSamples, validatedHistogramSamples, validatedExemplars, firstPartialErr, err := d.prepareSeriesKeysV2(ctx, req, userID, limits, b, &st, removeReplica)
+	if err != nil {
+		return nil, err
+	}
+	req.Symbols = st.Symbols()
+
+	d.receivedSamples.WithLabelValues(userID, sampleMetricTypeFloat).Add(float64(validatedFloatSamples))
+	d.receivedSamples.WithLabelValues(userID, sampleMetricTypeHistogram).Add(float64(validatedHistogramSamples))
+	d.receivedExemplars.WithLabelValues(userID).Add(float64(validatedExemplars))
+	// Metadata is attached to each series
+	d.receivedMetadata.WithLabelValues(userID).Add(float64(validatedMetadatas))
+
+	if len(seriesKeys) == 0 {
+		// Ensure the request slice is reused if there's no series or metadata passing the validation.
+		cortexpbv2.ReuseSlice(req.Timeseries)
+
+		return &cortexpbv2.WriteResponse{}, firstPartialErr
+	}
+
+	totalSamples := validatedFloatSamples + validatedHistogramSamples
+	totalN := totalSamples + validatedExemplars + validatedMetadatas
+	if !d.ingestionRateLimiter.AllowN(now, userID, int(totalN)) {
+		// Ensure the request slice is reused if the request is rate limited.
+		cortexpbv2.ReuseSlice(req.Timeseries)
+
+		d.validateMetrics.DiscardedSamples.WithLabelValues(validation.RateLimited, userID).Add(float64(totalSamples))
+		d.validateMetrics.DiscardedExemplars.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedExemplars))
+		d.validateMetrics.DiscardedMetadata.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedMetadatas))
+		// Return a 429 here to tell the client it is going too fast.
+		// Client may discard the data or slow down and re-send.
+		// Prometheus v2.26 added a remote-write option 'retry_on_http_429'.
+		return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (%v) exceeded while adding %d samples and %d metadata", d.ingestionRateLimiter.Limit(now, userID), totalSamples, validatedMetadatas)
+	}
+
+	// totalN included samples and metadata. Ingester follows this pattern when computing its ingestion rate.
+	d.ingestionRate.Add(totalN)
+
+	subRing := d.ingestersRing
+
+	// Obtain a subring if required.
+	if d.cfg.ShardingStrategy == util.ShardingStrategyShuffle {
+		subRing = d.ingestersRing.ShuffleShard(userID, limits.IngestionTenantShardSize)
+	}
+
+	keys := seriesKeys
+
+	s := WriteStats{}
+
+	err = d.doBatchV2(ctx, req, subRing, keys, validatedTimeseries, userID, &s)
+	if err != nil {
+		return nil, err
+	}
+
+	resp := &cortexpbv2.WriteResponse{
+		Samples:    s.LoadSamples(),
+		Histograms: s.LoadHistogram(),
+		Exemplars:  s.LoadExemplars(),
+	}
+
+	return resp, firstPartialErr
+}
+
 // Push implements client.IngesterServer
 func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) {
 	userID, err := tenant.TenantID(ctx)
diff --git a/pkg/distributor/distributor_prw2_test.go b/pkg/distributor/distributor_prw2_test.go
new file mode 100644
index 0000000000..80f8d92f70
--- /dev/null
+++ b/pkg/distributor/distributor_prw2_test.go
@@ -0,0 +1,2525 @@
+package distributor
+
+import (
+	"context"
+	"fmt"
+	"math"
+	"net/http"
+	"strconv"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/go-kit/log"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/client_golang/prometheus/testutil"
+	"github.com/prometheus/common/model"
+	"github.com/prometheus/prometheus/model/labels"
+	writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
+	"github.com/prometheus/prometheus/tsdb/tsdbutil"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"github.com/weaveworks/common/httpgrpc"
+	"github.com/weaveworks/common/user"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+
+	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
+	"github.com/cortexproject/cortex/pkg/ingester"
+	"github.com/cortexproject/cortex/pkg/ingester/client"
+	"github.com/cortexproject/cortex/pkg/ring"
+	ring_client "github.com/cortexproject/cortex/pkg/ring/client"
+	"github.com/cortexproject/cortex/pkg/ring/kv"
+	"github.com/cortexproject/cortex/pkg/ring/kv/consul"
+	"github.com/cortexproject/cortex/pkg/tenant"
+	"github.com/cortexproject/cortex/pkg/util"
+	"github.com/cortexproject/cortex/pkg/util/chunkcompat"
+	"github.com/cortexproject/cortex/pkg/util/flagext"
+	"github.com/cortexproject/cortex/pkg/util/limiter"
+	"github.com/cortexproject/cortex/pkg/util/services"
+	"github.com/cortexproject/cortex/pkg/util/test"
+	"github.com/cortexproject/cortex/pkg/util/validation"
+)
+
+var (
+	emptyResponseV2 = &cortexpbv2.WriteResponse{}
+)
+
+func TestDistributorPRW2_Push_LabelRemoval_RemovingNameLabelWillError(t *testing.T) {
+	t.Parallel()
+	ctx := user.InjectOrgID(context.Background(), "user")
+	type testcase struct {
+		inputSeries    labels.Labels
+		expectedSeries labels.Labels
+		removeReplica  bool
+		removeLabels   []string
+	}
+
+	tc := testcase{
+		removeReplica: true,
+		removeLabels:  []string{"__name__"},
+		inputSeries: labels.Labels{
+			{Name: "__name__", Value: "some_metric"},
+			{Name: "cluster", Value: "one"},
+			{Name: "__replica__", Value: "two"},
+		},
+		expectedSeries: labels.Labels{},
+	}
+
+	var err error
+	var limits validation.Limits
+	flagext.DefaultValues(&limits)
+	limits.DropLabels = tc.removeLabels
+	limits.AcceptHASamples = tc.removeReplica
+
+	ds, _, _, _ := prepare(t, prepConfig{
+		numIngesters:     2,
+		happyIngesters:   2,
+		numDistributors:  1,
+		shardByAllLabels: true,
+		limits:           &limits,
+	})
+
+	// Push the series to the distributor
+	req := mockWriteRequestV2([]labels.Labels{tc.inputSeries}, 1, 1, false)
+	_, err = ds[0].PushV2(ctx, req)
+	require.Error(t, err)
+	assert.Equal(t, "rpc error: code = Code(400) desc = sample missing metric name", err.Error())
+}
+
+func TestDistributorPRW2_Push_LabelRemoval(t *testing.T) {
+	t.Parallel()
+	ctx := user.InjectOrgID(context.Background(), "user")
+
+	type testcase struct {
+		inputSeries    labels.Labels
+		expectedSeries labels.Labels
+		removeReplica  bool
+		removeLabels   []string
+		exemplars      []cortexpbv2.Exemplar
+	}
+
+	cases := []testcase{
+		// Remove both cluster and replica label.
+		{
+			removeReplica: true,
+			removeLabels:  []string{"cluster"},
+			inputSeries: labels.Labels{
+				{Name: "__name__", Value: "some_metric"},
+				{Name: "cluster", Value: "one"},
+				{Name: "__replica__", Value: "two"},
+			},
+			expectedSeries: labels.Labels{
+				{Name: "__name__", Value: "some_metric"},
+			},
+		},
+		// Remove multiple labels and replica.
+		{
+			removeReplica: true,
+			removeLabels:  []string{"foo", "some"},
+			inputSeries: labels.Labels{
+				{Name: "__name__", Value: "some_metric"},
+				{Name: "cluster", Value: "one"},
+				{Name: "__replica__", Value: "two"},
+				{Name: "foo", Value: "bar"},
+				{Name: "some", Value: "thing"},
+			},
+			expectedSeries: labels.Labels{
+				{Name: "__name__", Value: "some_metric"},
+				{Name: "cluster", Value: "one"},
+			},
+		},
+		// Don't remove any labels.
+		{
+			removeReplica: false,
+			inputSeries: labels.Labels{
+				{Name: "__name__", Value: "some_metric"},
+				{Name: "__replica__", Value: "two"},
+				{Name: "cluster", Value: "one"},
+			},
+			expectedSeries: labels.Labels{
+				{Name: "__name__", Value: "some_metric"},
+				{Name: "__replica__", Value: "two"},
+				{Name: "cluster", Value: "one"},
+			},
+		},
+		// No labels left.
+		{
+			removeReplica: true,
+			removeLabels:  []string{"cluster"},
+			inputSeries: labels.Labels{
+				{Name: "cluster", Value: "one"},
+				{Name: "__replica__", Value: "two"},
+			},
+			expectedSeries: labels.Labels{},
+			exemplars: []cortexpbv2.Exemplar{
+				{LabelsRefs: []uint32{1, 2}, Value: 1, Timestamp: 0},
+				{LabelsRefs: []uint32{1, 2}, Value: 1, Timestamp: 0},
+			},
+		},
+	}
+
+	for _, tc := range cases {
+		for _, histogram := range []bool{true, false} {
+			var err error
+			var limits validation.Limits
+			flagext.DefaultValues(&limits)
+			limits.DropLabels = tc.removeLabels
+			limits.AcceptHASamples = tc.removeReplica
+
+			expectedDiscardedSamples := 0
+			expectedDiscardedExemplars := 0
+			if tc.expectedSeries.Len() == 0 {
+				expectedDiscardedSamples = 1
+				expectedDiscardedExemplars = len(tc.exemplars)
+				// Allow series with no labels to ingest
+				limits.EnforceMetricName = false
+			}
+
+			ds, ingesters, _, _ := prepare(t, prepConfig{
+				numIngesters:     2,
+				happyIngesters:   2,
+				numDistributors:  1,
+				shardByAllLabels: true,
+				limits:           &limits,
+			})
+
+			// Push the series to the distributor
+			req := mockWriteRequestV2([]labels.Labels{tc.inputSeries}, 1, 1, histogram)
+			req.Timeseries[0].Exemplars = tc.exemplars
+			_, err = ds[0].PushV2(ctx, req)
+			require.NoError(t, err)
+
+			actualDiscardedSamples := testutil.ToFloat64(ds[0].validateMetrics.DiscardedSamples.WithLabelValues(validation.DroppedByUserConfigurationOverride, "user"))
+			actualDiscardedExemplars := testutil.ToFloat64(ds[0].validateMetrics.DiscardedExemplars.WithLabelValues(validation.DroppedByUserConfigurationOverride, "user"))
+			require.Equal(t, float64(expectedDiscardedSamples), actualDiscardedSamples)
+			require.Equal(t, float64(expectedDiscardedExemplars), actualDiscardedExemplars)
+
+			// Since each test pushes only 1 series, we do expect the ingester
+			// to have received exactly 1 series
+			for i := range ingesters {
+				timeseries := ingesters[i].series()
+				expectedSeries := 1
+				if tc.expectedSeries.Len() == 0 {
+					expectedSeries = 0
+				}
+				assert.Equal(t, expectedSeries, len(timeseries))
+				for _, v := range timeseries {
+					assert.Equal(t, tc.expectedSeries, cortexpb.FromLabelAdaptersToLabels(v.Labels))
+				}
+			}
+		}
+	}
+}
+
+func TestDistributorPRW2_PushHAInstances(t *testing.T) {
+	t.Parallel()
+	ctx := user.InjectOrgID(context.Background(), "user")
+
+	for i, tc := range []struct {
+		enableTracker    bool
+		acceptedReplica  string
+		testReplica      string
+		cluster          string
+		samples          int
+		expectedResponse *cortexpbv2.WriteResponse
+		expectedCode     int32
+	}{
+		{
+			enableTracker:    true,
+			acceptedReplica:  "instance0",
+			testReplica:      "instance0",
+			cluster:          "cluster0",
+			samples:          5,
+			expectedResponse: emptyResponseV2,
+		},
+		// The 202 indicates that we didn't accept this sample.
+		{
+			enableTracker:   true,
+			acceptedReplica: "instance2",
+			testReplica:     "instance0",
+			cluster:         "cluster0",
+			samples:         5,
+			expectedCode:    202,
+		},
+		// If the HA tracker is disabled we should still accept samples that have both labels.
+		{
+			enableTracker:    false,
+			acceptedReplica:  "instance0",
+			testReplica:      "instance0",
+			cluster:          "cluster0",
+			samples:          5,
+			expectedResponse: emptyResponseV2,
+		},
+		// Using very long replica label value results in validation error.
+		{
+			enableTracker:    true,
+			acceptedReplica:  "instance0",
+			testReplica:      "instance1234567890123456789012345678901234567890",
+			cluster:          "cluster0",
+			samples:          5,
+			expectedResponse: emptyResponseV2,
+			expectedCode:     400,
+		},
+	} {
+		for _, shardByAllLabels := range []bool{true, false} {
+			tc := tc
+			shardByAllLabels := shardByAllLabels
+			for _, enableHistogram := range []bool{true, false} {
+				enableHistogram := enableHistogram
+				t.Run(fmt.Sprintf("[%d](shardByAllLabels=%v, histogram=%v)", i, shardByAllLabels, enableHistogram), func(t *testing.T) {
+					t.Parallel()
+					var limits validation.Limits
+					flagext.DefaultValues(&limits)
+					limits.AcceptHASamples = true
+					limits.MaxLabelValueLength = 15
+
+					ds, _, _, _ := prepare(t, prepConfig{
+						numIngesters:     3,
+						happyIngesters:   3,
+						numDistributors:  1,
+						shardByAllLabels: shardByAllLabels,
+						limits:           &limits,
+						enableTracker:    tc.enableTracker,
+					})
+
+					d := ds[0]
+
+					userID, err := tenant.TenantID(ctx)
+					assert.NoError(t, err)
+					err = d.HATracker.CheckReplica(ctx, userID, tc.cluster, tc.acceptedReplica, time.Now())
+					assert.NoError(t, err)
+
+					request := makeWriteRequestHAV2(tc.samples, tc.testReplica, tc.cluster, enableHistogram)
+					response, err := d.PushV2(ctx, request)
+					assert.Equal(t, tc.expectedResponse, response)
+
+					httpResp, ok := httpgrpc.HTTPResponseFromError(err)
+					if ok {
+						assert.Equal(t, tc.expectedCode, httpResp.Code)
+					} else if tc.expectedCode != 0 {
+						assert.Fail(t, "expected HTTP status code", tc.expectedCode)
+					}
+				})
+			}
+		}
+	}
+}
+
+func BenchmarkDistributorPRW2_Push(b *testing.B) {
+	const (
+		numSeriesPerRequest = 1000
+	)
+	ctx := user.InjectOrgID(context.Background(), "user")
+
+	tests := map[string]struct {
+		prepareConfig func(limits *validation.Limits)
+		prepareSeries func() ([]labels.Labels, []cortexpbv2.Sample)
+		expectedErr   string
+	}{
+		"all samples successfully pushed": {
+			prepareConfig: func(limits *validation.Limits) {},
+			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+				metrics := make([]labels.Labels, numSeriesPerRequest)
+				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+
+				for i := 0; i < numSeriesPerRequest; i++ {
+					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
+					for i := 0; i < 10; i++ {
+						lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i))
+					}
+
+					metrics[i] = lbls.Labels()
+					samples[i] = cortexpbv2.Sample{
+						Value:     float64(i),
+						Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+					}
+				}
+
+				return metrics, samples
+			},
+			expectedErr: "",
+		},
+		"ingestion rate limit reached": {
+			prepareConfig: func(limits *validation.Limits) {
+				limits.IngestionRate = 1
+				limits.IngestionBurstSize = 1
+			},
+			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+				metrics := make([]labels.Labels, numSeriesPerRequest)
+				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+
+				for i := 0; i < numSeriesPerRequest; i++ {
+					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
+					for i := 0; i < 10; i++ {
+						lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i))
+					}
+
+					metrics[i] = lbls.Labels()
+					samples[i] = cortexpbv2.Sample{
+						Value:     float64(i),
+						Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+					}
+				}
+
+				return metrics, samples
+			},
+			expectedErr: "ingestion rate limit",
+		},
+		"too many labels limit reached": {
+			prepareConfig: func(limits *validation.Limits) {
+				limits.MaxLabelNamesPerSeries = 30
+			},
+			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+				metrics := make([]labels.Labels, numSeriesPerRequest)
+				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+
+				for i := 0; i < numSeriesPerRequest; i++ {
+					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
+					for i := 1; i < 31; i++ {
+						lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i))
+					}
+
+					metrics[i] = lbls.Labels()
+					samples[i] = cortexpbv2.Sample{
+						Value:     float64(i),
+						Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+					}
+				}
+
+				return metrics, samples
+			},
+			expectedErr: "series has too many labels",
+		},
+		"max label name length limit reached": {
+			prepareConfig: func(limits *validation.Limits) {
+				limits.MaxLabelNameLength = 1024
+			},
+			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+				metrics := make([]labels.Labels, numSeriesPerRequest)
+				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+
+				for i := 0; i < numSeriesPerRequest; i++ {
+					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
+					for i := 0; i < 10; i++ {
+						lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i))
+					}
+
+					// Add a label with a very long name.
+					lbls.Set(fmt.Sprintf("xxx_%0.2000d", 1), "xxx")
+
+					metrics[i] = lbls.Labels()
+					samples[i] = cortexpbv2.Sample{
+						Value:     float64(i),
+						Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+					}
+				}
+
+				return metrics, samples
+			},
+			expectedErr: "label name too long",
+		},
+		"max label value length limit reached": {
+			prepareConfig: func(limits *validation.Limits) {
+				limits.MaxLabelValueLength = 1024
+			},
+			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+				metrics := make([]labels.Labels, numSeriesPerRequest)
+				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+
+				for i := 0; i < numSeriesPerRequest; i++ {
+					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
+					for i := 0; i < 10; i++ {
+						lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i))
+					}
+
+					// Add a label with a very long value.
+					lbls.Set("xxx", fmt.Sprintf("xxx_%0.2000d", 1))
+
+					metrics[i] = lbls.Labels()
+					samples[i] = cortexpbv2.Sample{
+						Value:     float64(i),
+						Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+					}
+				}
+
+				return metrics, samples
+			},
+			expectedErr: "label value too long",
+		},
+		"max label size bytes per series limit reached": {
+			prepareConfig: func(limits *validation.Limits) {
+				limits.MaxLabelsSizeBytes = 1024
+			},
+			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+				metrics := make([]labels.Labels, numSeriesPerRequest)
+				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+
+				for i := 0; i < numSeriesPerRequest; i++ {
+					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
+					for i := 0; i < 10; i++ {
+						lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i))
+					}
+
+					// Add a label with a very long value.
+					lbls.Set("xxx", fmt.Sprintf("xxx_%0.2000d", 1))
+
+					metrics[i] = lbls.Labels()
+					samples[i] = cortexpbv2.Sample{
+						Value:     float64(i),
+						Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+					}
+				}
+
+				return metrics, samples
+			},
+			expectedErr: "labels size bytes exceeded",
+		},
+		"timestamp too old": {
+			prepareConfig: func(limits *validation.Limits) {
+				limits.RejectOldSamples = true
+				limits.RejectOldSamplesMaxAge = model.Duration(time.Hour)
+			},
+			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+				metrics := make([]labels.Labels, numSeriesPerRequest)
+				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+
+				for i := 0; i < numSeriesPerRequest; i++ {
+					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
+					for i := 0; i < 10; i++ {
+						lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i))
+					}
+
+					metrics[i] = lbls.Labels()
+					samples[i] = cortexpbv2.Sample{
+						Value:     float64(i),
+						Timestamp: time.Now().Add(-2*time.Hour).UnixNano() / int64(time.Millisecond),
+					}
+				}
+
+				return metrics, samples
+			},
+			expectedErr: "timestamp too old",
+		},
+		"timestamp too new": {
+			prepareConfig: func(limits *validation.Limits) {
+				limits.CreationGracePeriod = model.Duration(time.Minute)
+			},
+			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+				metrics := make([]labels.Labels, numSeriesPerRequest)
+				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+
+				for i := 0; i < numSeriesPerRequest; i++ {
+					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
+					for i := 0; i < 10; i++ {
+						lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i))
+					}
+
+					metrics[i] = lbls.Labels()
+					samples[i] = cortexpbv2.Sample{
+						Value:     float64(i),
+						Timestamp: time.Now().Add(time.Hour).UnixNano() / int64(time.Millisecond),
+					}
+				}
+
+				return metrics, samples
+			},
+			expectedErr: "timestamp too new",
+		},
+	}
+
+	tg := ring.NewRandomTokenGenerator()
+
+	for testName, testData := range tests {
+		b.Run(testName, func(b *testing.B) {
+
+			// Create an in-memory KV store for the ring with 1 ingester registered.
+			kvStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil)
+			b.Cleanup(func() { assert.NoError(b, closer.Close()) })
+
+			err := kvStore.CAS(context.Background(), ingester.RingKey,
+				func(_ interface{}) (interface{}, bool, error) {
+					d := &ring.Desc{}
+					d.AddIngester("ingester-1", "127.0.0.1", "", tg.GenerateTokens(d, "ingester-1", "", 128, true), ring.ACTIVE, time.Now())
+					return d, true, nil
+				},
+			)
+			require.NoError(b, err)
+
+			ingestersRing, err := ring.New(ring.Config{
+				KVStore:           kv.Config{Mock: kvStore},
+				HeartbeatTimeout:  60 * time.Minute,
+				ReplicationFactor: 1,
+			}, ingester.RingKey, ingester.RingKey, nil, nil)
+			require.NoError(b, err)
+			require.NoError(b, services.StartAndAwaitRunning(context.Background(), ingestersRing))
+			b.Cleanup(func() {
+				require.NoError(b, services.StopAndAwaitTerminated(context.Background(), ingestersRing))
+			})
+
+			test.Poll(b, time.Second, 1, func() interface{} {
+				return ingestersRing.InstancesCount()
+			})
+
+			// Prepare the distributor configuration.
+			var distributorCfg Config
+			var clientConfig client.Config
+			limits := validation.Limits{}
+			flagext.DefaultValues(&distributorCfg, &clientConfig, &limits)
+
+			limits.IngestionRate = 10000000 // Unlimited.
+			testData.prepareConfig(&limits)
+
+			distributorCfg.ShardByAllLabels = true
+			distributorCfg.IngesterClientFactory = func(addr string) (ring_client.PoolClient, error) {
+				return &noopIngester{}, nil
+			}
+
+			overrides, err := validation.NewOverrides(limits, nil)
+			require.NoError(b, err)
+
+			// Start the distributor.
+			distributor, err := New(distributorCfg, clientConfig, overrides, ingestersRing, true, prometheus.NewRegistry(), log.NewNopLogger())
+			require.NoError(b, err)
+			require.NoError(b, services.StartAndAwaitRunning(context.Background(), distributor))
+
+			b.Cleanup(func() {
+				require.NoError(b, services.StopAndAwaitTerminated(context.Background(), distributor))
+			})
+
+			// Prepare the series to remote write before starting the benchmark.
+			metrics, samples := testData.prepareSeries()
+
+			// Run the benchmark.
+			b.ReportAllocs()
+			b.ResetTimer()
+
+			for n := 0; n < b.N; n++ {
+				_, err := distributor.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API))
+				if testData.expectedErr == "" && err != nil {
+					b.Fatalf("no error expected but got %v", err)
+				}
+				if testData.expectedErr != "" && (err == nil || !strings.Contains(err.Error(), testData.expectedErr)) {
+					b.Fatalf("expected %v error but got %v", testData.expectedErr, err)
+				}
+			}
+		})
+	}
+}
+
+func TestDistributorPRW2_Push(t *testing.T) {
+	t.Parallel()
+	// Metrics to assert on.
+	lastSeenTimestamp := "cortex_distributor_latest_seen_sample_timestamp_seconds"
+	distributorAppend := "cortex_distributor_ingester_appends_total"
+	distributorAppendFailure := "cortex_distributor_ingester_append_failures_total"
+	distributorReceivedSamples := "cortex_distributor_received_samples_total"
+	ctx := user.InjectOrgID(context.Background(), "userDistributorPush")
+
+	type samplesIn struct {
+		num              int
+		startTimestampMs int64
+	}
+	for name, tc := range map[string]struct {
+		metricNames      []string
+		numIngesters     int
+		happyIngesters   int
+		samples          samplesIn
+		histogramSamples bool
+		metadata         int
+		expectedResponse *cortexpbv2.WriteResponse
+		expectedError    error
+		expectedMetrics  string
+		ingesterError    error
+	}{
+		"A push of no samples shouldn't block or return error, even if ingesters are sad": {
+			numIngesters:     3,
+			happyIngesters:   0,
+			expectedResponse: emptyResponseV2,
+		},
+		"A push to 3 happy ingesters should succeed": {
+			numIngesters:     3,
+			happyIngesters:   3,
+			samples:          samplesIn{num: 5, startTimestampMs: 123456789000},
+			metadata:         5,
+			expectedResponse: emptyResponseV2,
+			metricNames:      []string{lastSeenTimestamp},
+			expectedMetrics: `
+				# HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user.
+				# TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge
+				cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.004
+			`,
+		},
+		"A push to 2 happy ingesters should succeed": {
+			numIngesters:     3,
+			happyIngesters:   2,
+			samples:          samplesIn{num: 5, startTimestampMs: 123456789000},
+			metadata:         5,
+			expectedResponse: emptyResponseV2,
+			metricNames:      []string{lastSeenTimestamp},
+			expectedMetrics: `
+				# HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user.
+				# TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge
+				cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.004
+			`,
+		},
+		"A push to 1 happy ingesters should fail": {
+			numIngesters:   3,
+			happyIngesters: 1,
+			samples:        samplesIn{num: 10, startTimestampMs: 123456789000},
+			expectedError:  errFail,
+			metricNames:    []string{lastSeenTimestamp},
+			expectedMetrics: `
+				# HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user.
+				# TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge
+				cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.009
+			`,
+		},
+		"A push to 0 happy ingesters should fail": {
+			numIngesters:   3,
+			happyIngesters: 0,
+			samples:        samplesIn{num: 10, startTimestampMs: 123456789000},
+			expectedError:  errFail,
+			metricNames:    []string{lastSeenTimestamp},
+			expectedMetrics: `
+				# HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user.
+				# TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge
+				cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.009
+			`,
+		},
+		"A push exceeding burst size should fail": {
+			numIngesters:   3,
+			happyIngesters: 3,
+			samples:        samplesIn{num: 25, startTimestampMs: 123456789000},
+			metadata:       5,
+			expectedError:  httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (20) exceeded while adding 25 samples and 5 metadata"),
+			metricNames:    []string{lastSeenTimestamp},
+			expectedMetrics: `
+				# HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user.
+				# TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge
+				cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.024
+			`,
+		},
+		"A push to ingesters should report the correct metrics with no metadata": {
+			numIngesters:     3,
+			happyIngesters:   2,
+			samples:          samplesIn{num: 1, startTimestampMs: 123456789000},
+			metadata:         0,
+			metricNames:      []string{distributorAppend, distributorAppendFailure},
+			expectedResponse: emptyResponseV2,
+			expectedMetrics: `
+				# HELP cortex_distributor_ingester_append_failures_total The total number of failed batch appends sent to ingesters.
+				# TYPE cortex_distributor_ingester_append_failures_total counter
+				cortex_distributor_ingester_append_failures_total{ingester="ingester-2",status="5xx",type="samples"} 1
+				# HELP cortex_distributor_ingester_appends_total The total number of batch appends sent to ingesters.
+				# TYPE cortex_distributor_ingester_appends_total counter
+				cortex_distributor_ingester_appends_total{ingester="ingester-0",type="samples"} 1
+				cortex_distributor_ingester_appends_total{ingester="ingester-1",type="samples"} 1
+				cortex_distributor_ingester_appends_total{ingester="ingester-2",type="samples"} 1
+			`,
+		},
+		"A push to ingesters should report samples and metadata metrics with no samples": {
+			numIngesters:     3,
+			happyIngesters:   2,
+			samples:          samplesIn{num: 0, startTimestampMs: 123456789000},
+			metadata:         1,
+			metricNames:      []string{distributorAppend, distributorAppendFailure},
+			expectedResponse: emptyResponseV2,
+			ingesterError:    httpgrpc.Errorf(http.StatusInternalServerError, "Fail"),
+			expectedMetrics: `
+				# HELP cortex_distributor_ingester_append_failures_total The total number of failed batch appends sent to ingesters.
+				# TYPE cortex_distributor_ingester_append_failures_total counter
+				cortex_distributor_ingester_append_failures_total{ingester="ingester-2",status="5xx",type="metadata"} 1
+				cortex_distributor_ingester_append_failures_total{ingester="ingester-2",status="5xx",type="samples"} 1
+				# HELP cortex_distributor_ingester_appends_total The total number of batch appends sent to ingesters.
+				# TYPE cortex_distributor_ingester_appends_total counter
+				cortex_distributor_ingester_appends_total{ingester="ingester-0",type="metadata"} 1
+				cortex_distributor_ingester_appends_total{ingester="ingester-1",type="metadata"} 1
+				cortex_distributor_ingester_appends_total{ingester="ingester-2",type="metadata"} 1
+				cortex_distributor_ingester_appends_total{ingester="ingester-0",type="samples"} 1
+				cortex_distributor_ingester_appends_total{ingester="ingester-1",type="samples"} 1
+				cortex_distributor_ingester_appends_total{ingester="ingester-2",type="samples"} 1
+			`,
+		},
+		"A push to overloaded ingesters should report the correct metrics": {
+			numIngesters:     3,
+			happyIngesters:   2,
+			samples:          samplesIn{num: 0, startTimestampMs: 123456789000},
+			metadata:         1,
+			metricNames:      []string{distributorAppend, distributorAppendFailure},
+			expectedResponse: emptyResponseV2,
+			ingesterError:    httpgrpc.Errorf(http.StatusTooManyRequests, "Fail"),
+			expectedMetrics: `
+				# HELP cortex_distributor_ingester_appends_total The total number of batch appends sent to ingesters.
+				# TYPE cortex_distributor_ingester_appends_total counter
+				cortex_distributor_ingester_appends_total{ingester="ingester-0",type="metadata"} 1
+				cortex_distributor_ingester_appends_total{ingester="ingester-1",type="metadata"} 1
+				cortex_distributor_ingester_appends_total{ingester="ingester-2",type="metadata"} 1
+				cortex_distributor_ingester_appends_total{ingester="ingester-0",type="samples"} 1
+				cortex_distributor_ingester_appends_total{ingester="ingester-1",type="samples"} 1
+				cortex_distributor_ingester_appends_total{ingester="ingester-2",type="samples"} 1
+				# HELP cortex_distributor_ingester_append_failures_total The total number of failed batch appends sent to ingesters.
+				# TYPE cortex_distributor_ingester_append_failures_total counter
+				cortex_distributor_ingester_append_failures_total{ingester="ingester-2",status="4xx",type="metadata"} 1
+				cortex_distributor_ingester_append_failures_total{ingester="ingester-2",status="4xx",type="samples"} 1
+			`,
+		},
+		"A push to 3 happy ingesters should succeed, histograms": {
+			numIngesters:     3,
+			happyIngesters:   3,
+			samples:          samplesIn{num: 5, startTimestampMs: 123456789000},
+			histogramSamples: true,
+			metadata:         5,
+			expectedResponse: emptyResponseV2,
+			metricNames:      []string{lastSeenTimestamp, distributorReceivedSamples},
+			expectedMetrics: `
+				# HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user.
+				# TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge
+				cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.004
+				# HELP cortex_distributor_received_samples_total The total number of received samples, excluding rejected and deduped samples.
+				# TYPE cortex_distributor_received_samples_total counter
+				cortex_distributor_received_samples_total{type="float",user="userDistributorPush"} 0
+				cortex_distributor_received_samples_total{type="histogram",user="userDistributorPush"} 5
+			`,
+		},
+		"A push to 2 happy ingesters should succeed, histograms": {
+			numIngesters:     3,
+			happyIngesters:   2,
+			samples:          samplesIn{num: 5, startTimestampMs: 123456789000},
+			histogramSamples: true,
+			metadata:         5,
+			expectedResponse: emptyResponseV2,
+			metricNames:      []string{lastSeenTimestamp, distributorReceivedSamples},
+			expectedMetrics: `
+				# HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user.
+				# TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge
+				cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.004
+				# HELP cortex_distributor_received_samples_total The total number of received samples, excluding rejected and deduped samples.
+				# TYPE cortex_distributor_received_samples_total counter
+				cortex_distributor_received_samples_total{type="float",user="userDistributorPush"} 0
+				cortex_distributor_received_samples_total{type="histogram",user="userDistributorPush"} 5
+			`,
+		},
+		"A push to 1 happy ingesters should fail, histograms": {
+			numIngesters:     3,
+			happyIngesters:   1,
+			samples:          samplesIn{num: 10, startTimestampMs: 123456789000},
+			histogramSamples: true,
+			expectedError:    errFail,
+			metricNames:      []string{lastSeenTimestamp, distributorReceivedSamples},
+			expectedMetrics: `
+				# HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user.
+				# TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge
+				cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.009
+				# HELP cortex_distributor_received_samples_total The total number of received samples, excluding rejected and deduped samples.
+				# TYPE cortex_distributor_received_samples_total counter
+				cortex_distributor_received_samples_total{type="float",user="userDistributorPush"} 0
+				cortex_distributor_received_samples_total{type="histogram",user="userDistributorPush"} 10
+			`,
+		},
+		"A push exceeding burst size should fail, histograms": {
+			numIngesters:     3,
+			happyIngesters:   3,
+			samples:          samplesIn{num: 25, startTimestampMs: 123456789000},
+			histogramSamples: true,
+			metadata:         5,
+			expectedError:    httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (20) exceeded while adding 25 samples and 5 metadata"),
+			metricNames:      []string{lastSeenTimestamp, distributorReceivedSamples},
+			expectedMetrics: `
+				# HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user.
+				# TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge
+				cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.024
+				# HELP cortex_distributor_received_samples_total The total number of received samples, excluding rejected and deduped samples.
+				# TYPE cortex_distributor_received_samples_total counter
+				cortex_distributor_received_samples_total{type="float",user="userDistributorPush"} 0
+				cortex_distributor_received_samples_total{type="histogram",user="userDistributorPush"} 25
+			`,
+		},
+	} {
+		for _, shardByAllLabels := range []bool{true, false} {
+			tc := tc
+			name := name
+			shardByAllLabels := shardByAllLabels
+			t.Run(fmt.Sprintf("[%s](shardByAllLabels=%v)", name, shardByAllLabels), func(t *testing.T) {
+				t.Parallel()
+				limits := &validation.Limits{}
+				flagext.DefaultValues(limits)
+				limits.IngestionRate = 20
+				limits.IngestionBurstSize = 20
+
+				ds, _, regs, _ := prepare(t, prepConfig{
+					numIngesters:     tc.numIngesters,
+					happyIngesters:   tc.happyIngesters,
+					numDistributors:  1,
+					shardByAllLabels: shardByAllLabels,
+					limits:           limits,
+					errFail:          tc.ingesterError,
+				})
+
+				var request *cortexpbv2.WriteRequest
+				if !tc.histogramSamples {
+					request = makeWriteRequestV2WithSamples(tc.samples.startTimestampMs, tc.samples.num, tc.metadata)
+				} else {
+					request = makeWriteRequestV2WithHistogram(tc.samples.startTimestampMs, tc.samples.num, tc.metadata)
+				}
+
+				response, err := ds[0].PushV2(ctx, request)
+				assert.Equal(t, tc.expectedResponse, response)
+				assert.Equal(t, status.Code(tc.expectedError), status.Code(err))
+
+				// Check tracked Prometheus metrics. Since the Push() response is sent as soon as the quorum
+				// is reached, when we reach this point the 3rd ingester may not have received series/metadata
+				// yet. To avoid flaky test we retry metrics assertion until we hit the desired state (no error)
+				// within a reasonable timeout.
+				if tc.expectedMetrics != "" {
+					test.Poll(t, time.Second, nil, func() interface{} {
+						return testutil.GatherAndCompare(regs[0], strings.NewReader(tc.expectedMetrics), tc.metricNames...)
+					})
+				}
+			})
+		}
+	}
+}
+
+func TestDistributorPRW2_PushIngestionRateLimiter(t *testing.T) {
+	t.Parallel()
+	type testPush struct {
+		samples       int
+		metadata      int
+		expectedError error
+	}
+
+	ctx := user.InjectOrgID(context.Background(), "user")
+	tests := map[string]struct {
+		distributors          int
+		ingestionRateStrategy string
+		ingestionRate         float64
+		ingestionBurstSize    int
+		pushes                []testPush
+	}{
+		"local strategy: limit should be set to each distributor": {
+			distributors:          2,
+			ingestionRateStrategy: validation.LocalIngestionRateStrategy,
+			ingestionRate:         10,
+			ingestionBurstSize:    10,
+			pushes: []testPush{
+				{samples: 4, expectedError: nil},
+				{metadata: 1, expectedError: nil},
+				{samples: 6, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (10) exceeded while adding 6 samples and 0 metadata")},
+				{samples: 4, metadata: 1, expectedError: nil},
+				{samples: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (10) exceeded while adding 1 samples and 0 metadata")},
+				{metadata: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (10) exceeded while adding 0 samples and 1 metadata")},
+			},
+		},
+		"global strategy: limit should be evenly shared across distributors": {
+			distributors:          2,
+			ingestionRateStrategy: validation.GlobalIngestionRateStrategy,
+			ingestionRate:         10,
+			ingestionBurstSize:    5,
+			pushes: []testPush{
+				{samples: 2, expectedError: nil},
+				{samples: 1, expectedError: nil},
+				{samples: 2, metadata: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (5) exceeded while adding 2 samples and 1 metadata")},
+				{samples: 2, expectedError: nil},
+				{samples: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (5) exceeded while adding 1 samples and 0 metadata")},
+				{metadata: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (5) exceeded while adding 0 samples and 1 metadata")},
+			},
+		},
+		"global strategy: burst should set to each distributor": {
+			distributors:          2,
+			ingestionRateStrategy: validation.GlobalIngestionRateStrategy,
+			ingestionRate:         10,
+			ingestionBurstSize:    20,
+			pushes: []testPush{
+				{samples: 10, expectedError: nil},
+				{samples: 5, expectedError: nil},
+				{samples: 5, metadata: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (5) exceeded while adding 5 samples and 1 metadata")},
+				{samples: 5, expectedError: nil},
+				{samples: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (5) exceeded while adding 1 samples and 0 metadata")},
+				{metadata: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (5) exceeded while adding 0 samples and 1 metadata")},
+			},
+		},
+	}
+
+	for testName, testData := range tests {
+		testData := testData
+
+		for _, enableHistogram := range []bool{false, true} {
+			enableHistogram := enableHistogram
+			t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(enableHistogram)), func(t *testing.T) {
+				t.Parallel()
+				limits := &validation.Limits{}
+				flagext.DefaultValues(limits)
+				limits.IngestionRateStrategy = testData.ingestionRateStrategy
+				limits.IngestionRate = testData.ingestionRate
+				limits.IngestionBurstSize = testData.ingestionBurstSize
+
+				// Start all expected distributors
+				distributors, _, _, _ := prepare(t, prepConfig{
+					numIngesters:     3,
+					happyIngesters:   3,
+					numDistributors:  testData.distributors,
+					shardByAllLabels: true,
+					limits:           limits,
+				})
+
+				// Push samples in multiple requests to the first distributor
+				for _, push := range testData.pushes {
+					var request *cortexpbv2.WriteRequest
+					if !enableHistogram {
+						request = makeWriteRequestV2WithSamples(0, push.samples, push.metadata)
+					} else {
+						request = makeWriteRequestV2WithHistogram(0, push.samples, push.metadata)
+					}
+					response, err := distributors[0].PushV2(ctx, request)
+
+					if push.expectedError == nil {
+						assert.Equal(t, emptyResponseV2, response)
+						assert.Nil(t, err)
+					} else {
+						assert.Nil(t, response)
+						assert.Equal(t, push.expectedError, err)
+					}
+				}
+			})
+		}
+	}
+}
+
+func TestPushPRW2_QuorumError(t *testing.T) {
+	t.Parallel()
+
+	var limits validation.Limits
+	flagext.DefaultValues(&limits)
+
+	limits.IngestionRate = math.MaxFloat64
+
+	dists, ingesters, _, r := prepare(t, prepConfig{
+		numDistributors:     1,
+		numIngesters:        3,
+		happyIngesters:      0,
+		shuffleShardSize:    3,
+		shardByAllLabels:    true,
+		shuffleShardEnabled: true,
+		limits:              &limits,
+	})
+
+	ctx := user.InjectOrgID(context.Background(), "user")
+
+	d := dists[0]
+
+	// we should run several write request to make sure we dont have any race condition on the batchTracker#record code
+	numberOfWrites := 10000
+
+	// Using 429 just to make sure we are not hitting the &limits
+	// Simulating 2 4xx and 1 5xx -> Should return 4xx
+	ingesters[0].failResp.Store(httpgrpc.Errorf(429, "Throttling"))
+	ingesters[1].failResp.Store(httpgrpc.Errorf(500, "InternalServerError"))
+	ingesters[2].failResp.Store(httpgrpc.Errorf(429, "Throttling"))
+
+	for i := 0; i < numberOfWrites; i++ {
+		request := makeWriteRequestV2WithSamples(0, 30, 20)
+		_, err := d.PushV2(ctx, request)
+		status, ok := status.FromError(err)
+		require.True(t, ok)
+		require.Equal(t, codes.Code(429), status.Code())
+	}
+
+	// Simulating 2 5xx and 1 4xx -> Should return 5xx
+	ingesters[0].failResp.Store(httpgrpc.Errorf(500, "InternalServerError"))
+	ingesters[1].failResp.Store(httpgrpc.Errorf(429, "Throttling"))
+	ingesters[2].failResp.Store(httpgrpc.Errorf(500, "InternalServerError"))
+
+	for i := 0; i < numberOfWrites; i++ {
+		request := makeWriteRequestV2WithSamples(0, 300, 200)
+		_, err := d.PushV2(ctx, request)
+		status, ok := status.FromError(err)
+		require.True(t, ok)
+		require.Equal(t, codes.Code(500), status.Code())
+	}
+
+	// Simulating 2 different errors and 1 success -> This case we may return any of the errors
+	ingesters[0].failResp.Store(httpgrpc.Errorf(500, "InternalServerError"))
+	ingesters[1].failResp.Store(httpgrpc.Errorf(429, "Throttling"))
+	ingesters[2].happy.Store(true)
+
+	for i := 0; i < numberOfWrites; i++ {
+		request := makeWriteRequestV2WithSamples(0, 30, 20)
+		_, err := d.PushV2(ctx, request)
+		status, ok := status.FromError(err)
+		require.True(t, ok)
+		require.Equal(t, codes.Code(429), status.Code())
+	}
+
+	// Simulating 1 error -> Should return 2xx
+	ingesters[0].failResp.Store(httpgrpc.Errorf(500, "InternalServerError"))
+	ingesters[1].happy.Store(true)
+	ingesters[2].happy.Store(true)
+
+	for i := 0; i < 1; i++ {
+		request := makeWriteRequestV2WithSamples(0, 30, 20)
+		_, err := d.PushV2(ctx, request)
+		require.NoError(t, err)
+	}
+
+	// Simulating an unhealthy ingester (ingester 2)
+	ingesters[0].failResp.Store(httpgrpc.Errorf(500, "InternalServerError"))
+	ingesters[1].happy.Store(true)
+	ingesters[2].happy.Store(true)
+
+	err := r.KVClient.CAS(context.Background(), ingester.RingKey, func(in interface{}) (interface{}, bool, error) {
+		r := in.(*ring.Desc)
+		ingester2 := r.Ingesters["ingester-2"]
+		ingester2.State = ring.LEFT
+		ingester2.Timestamp = time.Now().Unix()
+		r.Ingesters["ingester-2"] = ingester2
+		return in, true, nil
+	})
+
+	require.NoError(t, err)
+
+	// Give time to the ring get updated with the KV value
+	test.Poll(t, 15*time.Second, true, func() interface{} {
+		replicationSet, _ := r.GetAllHealthy(ring.Read)
+		return len(replicationSet.Instances) == 2
+	})
+
+	for i := 0; i < numberOfWrites; i++ {
+		request := makeWriteRequestV2WithSamples(0, 30, 20)
+		_, err := d.PushV2(ctx, request)
+		require.Error(t, err)
+		status, ok := status.FromError(err)
+		require.True(t, ok)
+		require.Equal(t, codes.Code(500), status.Code())
+	}
+}
+
+func TestDistributorPRW2_PushInstanceLimits(t *testing.T) {
+	t.Parallel()
+
+	type testPush struct {
+		samples       int
+		metadata      int
+		expectedError error
+	}
+
+	ctx := user.InjectOrgID(context.Background(), "user")
+	tests := map[string]struct {
+		preInflight    int
+		preRateSamples int        // initial rate before first push
+		pushes         []testPush // rate is recomputed after each push
+
+		// limits
+		inflightLimit      int
+		ingestionRateLimit float64
+
+		metricNames     []string
+		expectedMetrics string
+	}{
+		"no limits limit": {
+			preInflight:    100,
+			preRateSamples: 1000,
+
+			pushes: []testPush{
+				{samples: 100, expectedError: nil},
+			},
+
+			metricNames: []string{instanceLimitsMetric},
+			expectedMetrics: `
+				# HELP cortex_distributor_instance_limits Instance limits used by this distributor.
+				# TYPE cortex_distributor_instance_limits gauge
+				cortex_distributor_instance_limits{limit="max_inflight_push_requests"} 0
+				cortex_distributor_instance_limits{limit="max_ingestion_rate"} 0
+			`,
+		},
+		"below inflight limit": {
+			preInflight:   100,
+			inflightLimit: 101,
+			pushes: []testPush{
+				{samples: 100, expectedError: nil},
+			},
+
+			metricNames: []string{instanceLimitsMetric, "cortex_distributor_inflight_push_requests"},
+			expectedMetrics: `
+				# HELP cortex_distributor_inflight_push_requests Current number of inflight push requests in distributor.
+				# TYPE cortex_distributor_inflight_push_requests gauge
+				cortex_distributor_inflight_push_requests 100
+
+				# HELP cortex_distributor_instance_limits Instance limits used by this distributor.
+				# TYPE cortex_distributor_instance_limits gauge
+				cortex_distributor_instance_limits{limit="max_inflight_push_requests"} 101
+				cortex_distributor_instance_limits{limit="max_ingestion_rate"} 0
+			`,
+		},
+		"hits inflight limit": {
+			preInflight:   101,
+			inflightLimit: 101,
+			pushes: []testPush{
+				{samples: 100, expectedError: errTooManyInflightPushRequests},
+			},
+		},
+		"below ingestion rate limit": {
+			preRateSamples:     500,
+			ingestionRateLimit: 1000,
+
+			pushes: []testPush{
+				{samples: 1000, expectedError: nil},
+			},
+
+			metricNames: []string{instanceLimitsMetric, "cortex_distributor_ingestion_rate_samples_per_second"},
+			expectedMetrics: `
+				# HELP cortex_distributor_ingestion_rate_samples_per_second Current ingestion rate in samples/sec that distributor is using to limit access.
+				# TYPE cortex_distributor_ingestion_rate_samples_per_second gauge
+				cortex_distributor_ingestion_rate_samples_per_second 600
+
+				# HELP cortex_distributor_instance_limits Instance limits used by this distributor.
+				# TYPE cortex_distributor_instance_limits gauge
+				cortex_distributor_instance_limits{limit="max_inflight_push_requests"} 0
+				cortex_distributor_instance_limits{limit="max_ingestion_rate"} 1000
+			`,
+		},
+		"hits rate limit on first request, but second request can proceed": {
+			preRateSamples:     1200,
+			ingestionRateLimit: 1000,
+
+			pushes: []testPush{
+				{samples: 100, expectedError: errMaxSamplesPushRateLimitReached},
+				{samples: 100, expectedError: nil},
+			},
+		},
+		"below rate limit on first request, but hits the rate limit afterwards": {
+			preRateSamples:     500,
+			ingestionRateLimit: 1000,
+
+			pushes: []testPush{
+				{samples: 5000, expectedError: nil},                               // after push, rate = 500 + 0.2*(5000-500) = 1400
+				{samples: 5000, expectedError: errMaxSamplesPushRateLimitReached}, // after push, rate = 1400 + 0.2*(0 - 1400) = 1120
+				{samples: 5000, expectedError: errMaxSamplesPushRateLimitReached}, // after push, rate = 1120 + 0.2*(0 - 1120) = 896
+				{samples: 5000, expectedError: nil},                               // 896 is below 1000, so this push succeeds, new rate = 896 + 0.2*(5000-896) = 1716.8
+			},
+		},
+	}
+
+	for testName, testData := range tests {
+		testData := testData
+
+		for _, enableHistogram := range []bool{true, false} {
+			enableHistogram := enableHistogram
+			t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(enableHistogram)), func(t *testing.T) {
+				t.Parallel()
+				limits := &validation.Limits{}
+				flagext.DefaultValues(limits)
+
+				// Start all expected distributors
+				distributors, _, regs, _ := prepare(t, prepConfig{
+					numIngesters:        3,
+					happyIngesters:      3,
+					numDistributors:     1,
+					shardByAllLabels:    true,
+					limits:              limits,
+					maxInflightRequests: testData.inflightLimit,
+					maxIngestionRate:    testData.ingestionRateLimit,
+				})
+
+				d := distributors[0]
+				d.inflightPushRequests.Add(int64(testData.preInflight))
+				d.ingestionRate.Add(int64(testData.preRateSamples))
+
+				d.ingestionRate.Tick()
+
+				for _, push := range testData.pushes {
+					var request *cortexpbv2.WriteRequest
+					if enableHistogram {
+						request = makeWriteRequestV2WithHistogram(0, push.samples, push.metadata)
+					} else {
+						request = makeWriteRequestV2WithSamples(0, push.samples, push.metadata)
+					}
+					_, err := d.PushV2(ctx, request)
+
+					if push.expectedError == nil {
+						assert.Nil(t, err)
+					} else {
+						assert.Equal(t, push.expectedError, err)
+					}
+
+					d.ingestionRate.Tick()
+
+					if testData.expectedMetrics != "" {
+						assert.NoError(t, testutil.GatherAndCompare(regs[0], strings.NewReader(testData.expectedMetrics), testData.metricNames...))
+					}
+				}
+			})
+		}
+	}
+}
+
+func TestDistributorPRW2_PushQuery(t *testing.T) {
+	t.Parallel()
+	const shuffleShardSize = 5
+
+	ctx := user.InjectOrgID(context.Background(), "user")
+	nameMatcher := mustEqualMatcher(model.MetricNameLabel, "foo")
+	barMatcher := mustEqualMatcher("bar", "baz")
+
+	type testcase struct {
+		name                string
+		numIngesters        int
+		happyIngesters      int
+		samples             int
+		metadata            int
+		matchers            []*labels.Matcher
+		expectedIngesters   int
+		expectedResponse    model.Matrix
+		expectedError       error
+		shardByAllLabels    bool
+		shuffleShardEnabled bool
+	}
+
+	// We'll programmatically build the test cases now, as we want complete
+	// coverage along quite a few different axis.
+	testcases := []testcase{}
+
+	// Run every test in both sharding modes.
+	for _, shardByAllLabels := range []bool{true, false} {
+
+		// Test with between 2 and 10 ingesters.
+		for numIngesters := 2; numIngesters < 10; numIngesters++ {
+
+			// Test with between 0 and numIngesters "happy" ingesters.
+			for happyIngesters := 0; happyIngesters <= numIngesters; happyIngesters++ {
+
+				// Test either with shuffle-sharding enabled or disabled.
+				for _, shuffleShardEnabled := range []bool{false, true} {
+					scenario := fmt.Sprintf("shardByAllLabels=%v, numIngester=%d, happyIngester=%d, shuffleSharding=%v)", shardByAllLabels, numIngesters, happyIngesters, shuffleShardEnabled)
+
+					// The number of ingesters we expect to query depends whether shuffle sharding and/or
+					// shard by all labels are enabled.
+					var expectedIngesters int
+					if shuffleShardEnabled {
+						expectedIngesters = min(shuffleShardSize, numIngesters)
+					} else if shardByAllLabels {
+						expectedIngesters = numIngesters
+					} else {
+						expectedIngesters = 3 // Replication factor
+					}
+
+					// When we're not sharding by metric name, queriers with more than one
+					// failed ingester should fail.
+					if shardByAllLabels && numIngesters-happyIngesters > 1 {
+						testcases = append(testcases, testcase{
+							name:                fmt.Sprintf("ExpectFail(%s)", scenario),
+							numIngesters:        numIngesters,
+							happyIngesters:      happyIngesters,
+							matchers:            []*labels.Matcher{nameMatcher, barMatcher},
+							expectedError:       errFail,
+							shardByAllLabels:    shardByAllLabels,
+							shuffleShardEnabled: shuffleShardEnabled,
+						})
+						continue
+					}
+
+					// When we have less ingesters than replication factor, any failed ingester
+					// will cause a failure.
+					if numIngesters < 3 && happyIngesters < 2 {
+						testcases = append(testcases, testcase{
+							name:                fmt.Sprintf("ExpectFail(%s)", scenario),
+							numIngesters:        numIngesters,
+							happyIngesters:      happyIngesters,
+							matchers:            []*labels.Matcher{nameMatcher, barMatcher},
+							expectedError:       errFail,
+							shardByAllLabels:    shardByAllLabels,
+							shuffleShardEnabled: shuffleShardEnabled,
+						})
+						continue
+					}
+
+					// If we're sharding by metric name and we have failed ingesters, we can't
+					// tell ahead of time if the query will succeed, as we don't know which
+					// ingesters will hold the results for the query.
+					if !shardByAllLabels && numIngesters-happyIngesters > 1 {
+						continue
+					}
+
+					// Reading all the samples back should succeed.
+					testcases = append(testcases, testcase{
+						name:                fmt.Sprintf("ReadAll(%s)", scenario),
+						numIngesters:        numIngesters,
+						happyIngesters:      happyIngesters,
+						samples:             10,
+						matchers:            []*labels.Matcher{nameMatcher, barMatcher},
+						expectedResponse:    expectedResponse(0, 10),
+						expectedIngesters:   expectedIngesters,
+						shardByAllLabels:    shardByAllLabels,
+						shuffleShardEnabled: shuffleShardEnabled,
+					})
+
+					// As should reading none of the samples back.
+					testcases = append(testcases, testcase{
+						name:                fmt.Sprintf("ReadNone(%s)", scenario),
+						numIngesters:        numIngesters,
+						happyIngesters:      happyIngesters,
+						samples:             10,
+						matchers:            []*labels.Matcher{nameMatcher, mustEqualMatcher("not", "found")},
+						expectedResponse:    expectedResponse(0, 0),
+						expectedIngesters:   expectedIngesters,
+						shardByAllLabels:    shardByAllLabels,
+						shuffleShardEnabled: shuffleShardEnabled,
+					})
+
+					// And reading each sample individually.
+					for i := 0; i < 10; i++ {
+						testcases = append(testcases, testcase{
+							name:                fmt.Sprintf("ReadOne(%s, sample=%d)", scenario, i),
+							numIngesters:        numIngesters,
+							happyIngesters:      happyIngesters,
+							samples:             10,
+							matchers:            []*labels.Matcher{nameMatcher, mustEqualMatcher("sample", strconv.Itoa(i))},
+							expectedResponse:    expectedResponse(i, i+1),
+							expectedIngesters:   expectedIngesters,
+							shardByAllLabels:    shardByAllLabels,
+							shuffleShardEnabled: shuffleShardEnabled,
+						})
+					}
+				}
+			}
+		}
+	}
+
+	for _, tc := range testcases {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			t.Parallel()
+			ds, ingesters, _, _ := prepare(t, prepConfig{
+				numIngesters:        tc.numIngesters,
+				happyIngesters:      tc.happyIngesters,
+				numDistributors:     1,
+				shardByAllLabels:    tc.shardByAllLabels,
+				shuffleShardEnabled: tc.shuffleShardEnabled,
+				shuffleShardSize:    shuffleShardSize,
+			})
+
+			request := makeWriteRequestV2WithSamples(0, tc.samples, tc.metadata)
+			writeResponse, err := ds[0].PushV2(ctx, request)
+			assert.Equal(t, &cortexpbv2.WriteResponse{}, writeResponse)
+			assert.Nil(t, err)
+
+			var response model.Matrix
+			series, err := ds[0].QueryStream(ctx, 0, 10, tc.matchers...)
+			assert.Equal(t, tc.expectedError, err)
+
+			if series == nil {
+				response, err = chunkcompat.SeriesChunksToMatrix(0, 10, nil)
+			} else {
+				response, err = chunkcompat.SeriesChunksToMatrix(0, 10, series.Chunkseries)
+			}
+			assert.NoError(t, err)
+			assert.Equal(t, tc.expectedResponse.String(), response.String())
+
+			// Check how many ingesters have been queried.
+			// Due to the quorum the distributor could cancel the last request towards ingesters
+			// if all other ones are successful, so we're good either has been queried X or X-1
+			// ingesters.
+			if tc.expectedError == nil {
+				assert.Contains(t, []int{tc.expectedIngesters, tc.expectedIngesters - 1}, countMockIngestersCalls(ingesters, "QueryStream"))
+			}
+		})
+	}
+}
+
+func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIsReached(t *testing.T) {
+	t.Parallel()
+	const maxChunksLimit = 30 // Chunks are duplicated due to replication factor.
+
+	for _, histogram := range []bool{true, false} {
+		ctx := user.InjectOrgID(context.Background(), "user")
+		limits := &validation.Limits{}
+		flagext.DefaultValues(limits)
+		limits.MaxChunksPerQuery = maxChunksLimit
+
+		// Prepare distributors.
+		ds, _, _, _ := prepare(t, prepConfig{
+			numIngesters:     3,
+			happyIngesters:   3,
+			numDistributors:  1,
+			shardByAllLabels: true,
+			limits:           limits,
+		})
+
+		ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, 0, maxChunksLimit, 0))
+
+		// Push a number of series below the max chunks limit. Each series has 1 sample,
+		// so expect 1 chunk per series when querying back.
+		initialSeries := maxChunksLimit / 3
+		var writeReqV2 *cortexpbv2.WriteRequest
+		if histogram {
+			writeReqV2 = makeWriteRequestV2WithHistogram(0, initialSeries, 0)
+		} else {
+			writeReqV2 = makeWriteRequestV2WithSamples(0, initialSeries, 0)
+		}
+
+		writeRes, err := ds[0].PushV2(ctx, writeReqV2)
+		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Nil(t, err)
+
+		allSeriesMatchers := []*labels.Matcher{
+			labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"),
+		}
+
+		// Since the number of series (and thus chunks) is equal to the limit (but doesn't
+		// exceed it), we expect a query running on all series to succeed.
+		queryRes, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...)
+		require.NoError(t, err)
+		assert.Len(t, queryRes.Chunkseries, initialSeries)
+
+		// Push more series to exceed the limit once we'll query back all series.
+
+		for i := 0; i < maxChunksLimit; i++ {
+			writeReq := &cortexpbv2.WriteRequest{}
+			writeReq.Symbols = []string{"", "__name__", fmt.Sprintf("another_series_%d", i)}
+			writeReq.Timeseries = append(writeReq.Timeseries,
+				makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: fmt.Sprintf("another_series_%d", i)}}, 0, 0, histogram, false),
+			)
+			writeRes, err := ds[0].PushV2(ctx, writeReq)
+			assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+			assert.Nil(t, err)
+		}
+
+		// Since the number of series (and thus chunks) is exceeding to the limit, we expect
+		// a query running on all series to fail.
+		_, err = ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...)
+		require.Error(t, err)
+		assert.Contains(t, err.Error(), "the query hit the max number of chunks limit")
+	}
+}
+
+func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIsReached(t *testing.T) {
+	t.Parallel()
+	const maxSeriesLimit = 10
+
+	for _, histogram := range []bool{true, false} {
+		ctx := user.InjectOrgID(context.Background(), "user")
+		limits := &validation.Limits{}
+		flagext.DefaultValues(limits)
+		ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(maxSeriesLimit, 0, 0, 0))
+
+		// Prepare distributors.
+		ds, _, _, _ := prepare(t, prepConfig{
+			numIngesters:     3,
+			happyIngesters:   3,
+			numDistributors:  1,
+			shardByAllLabels: true,
+			limits:           limits,
+		})
+
+		// Push a number of series below the max series limit.
+		initialSeries := maxSeriesLimit
+		var writeReqV2 *cortexpbv2.WriteRequest
+		if histogram {
+			writeReqV2 = makeWriteRequestV2WithHistogram(0, initialSeries, 0)
+		} else {
+			writeReqV2 = makeWriteRequestV2WithSamples(0, initialSeries, 0)
+		}
+
+		writeRes, err := ds[0].PushV2(ctx, writeReqV2)
+		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Nil(t, err)
+
+		allSeriesMatchers := []*labels.Matcher{
+			labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"),
+		}
+
+		// Since the number of series is equal to the limit (but doesn't
+		// exceed it), we expect a query running on all series to succeed.
+		queryRes, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...)
+		require.NoError(t, err)
+		assert.Len(t, queryRes.Chunkseries, initialSeries)
+
+		// Push more series to exceed the limit once we'll query back all series.
+		writeReq := &cortexpbv2.WriteRequest{}
+		writeReq.Symbols = []string{"", "__name__", "another_series"}
+		writeReq.Timeseries = append(writeReq.Timeseries,
+			makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0, histogram, false),
+		)
+
+		writeRes, err = ds[0].PushV2(ctx, writeReq)
+		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Nil(t, err)
+
+		// Since the number of series is exceeding the limit, we expect
+		// a query running on all series to fail.
+		_, err = ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...)
+		require.Error(t, err)
+		assert.Contains(t, err.Error(), "max number of series limit")
+	}
+}
+
+func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLimitIsReached(t *testing.T) {
+	t.Parallel()
+	const seriesToAdd = 10
+
+	for _, histogram := range []bool{true, false} {
+		ctx := user.InjectOrgID(context.Background(), "user")
+		limits := &validation.Limits{}
+		flagext.DefaultValues(limits)
+
+		// Prepare distributors.
+		// Use replication factor of 2 to always read all the chunks from both ingesters,
+		// this guarantees us to always read the same chunks and have a stable test.
+		ds, _, _, _ := prepare(t, prepConfig{
+			numIngesters:      2,
+			happyIngesters:    2,
+			numDistributors:   1,
+			shardByAllLabels:  true,
+			limits:            limits,
+			replicationFactor: 2,
+		})
+
+		allSeriesMatchers := []*labels.Matcher{
+			labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"),
+		}
+		// Push a single series to allow us to calculate the chunk size to calculate the limit for the test.
+		writeReq := &cortexpbv2.WriteRequest{}
+		writeReq.Symbols = []string{"", "__name__", "another_series"}
+		writeReq.Timeseries = append(writeReq.Timeseries,
+			makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0, histogram, false),
+		)
+		writeRes, err := ds[0].PushV2(ctx, writeReq)
+		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Nil(t, err)
+		chunkSizeResponse, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...)
+		require.NoError(t, err)
+
+		// Use the resulting chunks size to calculate the limit as (series to add + our test series) * the response chunk size.
+		var responseChunkSize = chunkSizeResponse.ChunksSize()
+		var maxBytesLimit = (seriesToAdd) * responseChunkSize
+
+		// Update the limiter with the calculated limits.
+		ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, maxBytesLimit, 0, 0))
+
+		// Push a number of series below the max chunk bytes limit. Subtract one for the series added above.
+		var writeReqV2 *cortexpbv2.WriteRequest
+		if histogram {
+			writeReqV2 = makeWriteRequestV2WithHistogram(0, seriesToAdd-1, 0)
+		} else {
+			writeReqV2 = makeWriteRequestV2WithSamples(0, seriesToAdd-1, 0)
+		}
+
+		writeRes, err = ds[0].PushV2(ctx, writeReqV2)
+		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Nil(t, err)
+
+		// Since the number of chunk bytes is equal to the limit (but doesn't
+		// exceed it), we expect a query running on all series to succeed.
+		queryRes, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...)
+		require.NoError(t, err)
+		assert.Len(t, queryRes.Chunkseries, seriesToAdd)
+
+		// Push another series to exceed the chunk bytes limit once we'll query back all series.
+		writeReq = &cortexpbv2.WriteRequest{}
+		writeReq.Symbols = []string{"", "__name__", "another_series_1"}
+		writeReq.Timeseries = append(writeReq.Timeseries,
+			makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series_1"}}, 0, 0, histogram, false),
+		)
+
+		writeRes, err = ds[0].PushV2(ctx, writeReq)
+		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Nil(t, err)
+
+		// Since the aggregated chunk size is exceeding the limit, we expect
+		// a query running on all series to fail.
+		_, err = ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...)
+		require.Error(t, err)
+		assert.Equal(t, err, validation.LimitError(fmt.Sprintf(limiter.ErrMaxChunkBytesHit, maxBytesLimit)))
+	}
+}
+
+func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxDataBytesPerQueryLimitIsReached(t *testing.T) {
+	t.Parallel()
+	const seriesToAdd = 10
+
+	for _, histogram := range []bool{true, false} {
+		ctx := user.InjectOrgID(context.Background(), "user")
+		limits := &validation.Limits{}
+		flagext.DefaultValues(limits)
+
+		// Prepare distributors.
+		// Use replication factor of 2 to always read all the chunks from both ingesters,
+		// this guarantees us to always read the same chunks and have a stable test.
+		ds, _, _, _ := prepare(t, prepConfig{
+			numIngesters:      2,
+			happyIngesters:    2,
+			numDistributors:   1,
+			shardByAllLabels:  true,
+			limits:            limits,
+			replicationFactor: 2,
+		})
+
+		allSeriesMatchers := []*labels.Matcher{
+			labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"),
+		}
+		// Push a single series to allow us to calculate the label size to calculate the limit for the test.
+		writeReq := &cortexpbv2.WriteRequest{}
+		writeReq.Symbols = []string{"", "__name__", "another_series"}
+		writeReq.Timeseries = append(writeReq.Timeseries,
+			makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0, histogram, false),
+		)
+
+		writeRes, err := ds[0].PushV2(ctx, writeReq)
+		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Nil(t, err)
+		dataSizeResponse, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...)
+		require.NoError(t, err)
+
+		// Use the resulting chunks size to calculate the limit as (series to add + our test series) * the response chunk size.
+		var dataSize = dataSizeResponse.Size()
+		var maxBytesLimit = (seriesToAdd) * dataSize * 2 // Multiplying by RF because the limit is applied before de-duping.
+
+		// Update the limiter with the calculated limits.
+		ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, 0, 0, maxBytesLimit))
+
+		// Push a number of series below the max chunk bytes limit. Subtract one for the series added above.
+		var writeReqV2 *cortexpbv2.WriteRequest
+		if histogram {
+			writeReqV2 = makeWriteRequestV2WithHistogram(0, seriesToAdd-1, 0)
+		} else {
+			writeReqV2 = makeWriteRequestV2WithSamples(0, seriesToAdd-1, 0)
+		}
+
+		writeRes, err = ds[0].PushV2(ctx, writeReqV2)
+		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Nil(t, err)
+
+		// Since the number of chunk bytes is equal to the limit (but doesn't
+		// exceed it), we expect a query running on all series to succeed.
+		queryRes, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...)
+		require.NoError(t, err)
+		assert.Len(t, queryRes.Chunkseries, seriesToAdd)
+
+		// Push another series to exceed the chunk bytes limit once we'll query back all series.
+		writeReq = &cortexpbv2.WriteRequest{}
+		writeReq.Symbols = []string{"", "__name__", "another_series_1"}
+		writeReq.Timeseries = append(writeReq.Timeseries,
+			makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series_1"}}, 0, 0, histogram, false),
+		)
+
+		writeRes, err = ds[0].PushV2(ctx, writeReq)
+		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Nil(t, err)
+
+		// Since the aggregated chunk size is exceeding the limit, we expect
+		// a query running on all series to fail.
+		_, err = ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...)
+		require.Error(t, err)
+		assert.Equal(t, err, validation.LimitError(fmt.Sprintf(limiter.ErrMaxDataBytesHit, maxBytesLimit)))
+	}
+}
+
+func TestDistributorPRW2_Push_ShouldGuaranteeShardingTokenConsistencyOverTheTime(t *testing.T) {
+	t.Parallel()
+	ctx := user.InjectOrgID(context.Background(), "user")
+	tests := map[string]struct {
+		inputSeries    labels.Labels
+		expectedSeries labels.Labels
+		expectedToken  uint32
+	}{
+		"metric_1 with value_1": {
+			inputSeries: labels.Labels{
+				{Name: "__name__", Value: "metric_1"},
+				{Name: "cluster", Value: "cluster_1"},
+				{Name: "key", Value: "value_1"},
+			},
+			expectedSeries: labels.Labels{
+				{Name: "__name__", Value: "metric_1"},
+				{Name: "cluster", Value: "cluster_1"},
+				{Name: "key", Value: "value_1"},
+			},
+			expectedToken: 0xec0a2e9d,
+		},
+		"metric_1 with value_1 and dropped label due to config": {
+			inputSeries: labels.Labels{
+				{Name: "__name__", Value: "metric_1"},
+				{Name: "cluster", Value: "cluster_1"},
+				{Name: "key", Value: "value_1"},
+				{Name: "dropped", Value: "unused"}, // will be dropped, doesn't need to be in correct order
+			},
+			expectedSeries: labels.Labels{
+				{Name: "__name__", Value: "metric_1"},
+				{Name: "cluster", Value: "cluster_1"},
+				{Name: "key", Value: "value_1"},
+			},
+			expectedToken: 0xec0a2e9d,
+		},
+		"metric_1 with value_1 and dropped HA replica label": {
+			inputSeries: labels.Labels{
+				{Name: "__name__", Value: "metric_1"},
+				{Name: "cluster", Value: "cluster_1"},
+				{Name: "key", Value: "value_1"},
+				{Name: "__replica__", Value: "replica_1"},
+			},
+			expectedSeries: labels.Labels{
+				{Name: "__name__", Value: "metric_1"},
+				{Name: "cluster", Value: "cluster_1"},
+				{Name: "key", Value: "value_1"},
+			},
+			expectedToken: 0xec0a2e9d,
+		},
+		"metric_2 with value_1": {
+			inputSeries: labels.Labels{
+				{Name: "__name__", Value: "metric_2"},
+				{Name: "key", Value: "value_1"},
+			},
+			expectedSeries: labels.Labels{
+				{Name: "__name__", Value: "metric_2"},
+				{Name: "key", Value: "value_1"},
+			},
+			expectedToken: 0xa60906f2,
+		},
+		"metric_1 with value_2": {
+			inputSeries: labels.Labels{
+				{Name: "__name__", Value: "metric_1"},
+				{Name: "key", Value: "value_2"},
+			},
+			expectedSeries: labels.Labels{
+				{Name: "__name__", Value: "metric_1"},
+				{Name: "key", Value: "value_2"},
+			},
+			expectedToken: 0x18abc8a2,
+		},
+	}
+
+	var limits validation.Limits
+	flagext.DefaultValues(&limits)
+	limits.DropLabels = []string{"dropped"}
+	limits.AcceptHASamples = true
+
+	for testName, testData := range tests {
+		testData := testData
+		t.Run(testName, func(t *testing.T) {
+			t.Parallel()
+			ds, ingesters, _, _ := prepare(t, prepConfig{
+				numIngesters:     2,
+				happyIngesters:   2,
+				numDistributors:  1,
+				shardByAllLabels: true,
+				limits:           &limits,
+			})
+
+			// Push the series to the distributor
+			req := mockWriteRequestV2([]labels.Labels{testData.inputSeries}, 1, 1, false)
+			_, err := ds[0].PushV2(ctx, req)
+			require.NoError(t, err)
+
+			// Since each test pushes only 1 series, we do expect the ingester
+			// to have received exactly 1 series
+			for i := range ingesters {
+				timeseries := ingesters[i].series()
+				assert.Equal(t, 1, len(timeseries))
+
+				series, ok := timeseries[testData.expectedToken]
+				require.True(t, ok)
+				assert.Equal(t, testData.expectedSeries, cortexpb.FromLabelAdaptersToLabels(series.Labels))
+			}
+		})
+	}
+}
+
+func makeWriteRequestV2WithSamples(startTimestampMs int64, samples int, metadata int) *cortexpbv2.WriteRequest {
+	request := &cortexpbv2.WriteRequest{}
+	st := writev2.NewSymbolTable()
+	st.SymbolizeLabels(labels.Labels{{Name: "__name__", Value: "foo"}, {Name: "bar", Value: "baz"}}, nil)
+
+	for i := 0; i < samples; i++ {
+		st.SymbolizeLabels(labels.Labels{{Name: "sample", Value: fmt.Sprintf("%d", i)}, {Name: "bar", Value: "baz"}}, nil)
+		request.Timeseries = append(request.Timeseries, makeTimeseriesV2FromST(
+			[]cortexpb.LabelAdapter{
+				{Name: model.MetricNameLabel, Value: "foo"},
+				{Name: "bar", Value: "baz"},
+				{Name: "sample", Value: fmt.Sprintf("%d", i)},
+			}, &st, startTimestampMs+int64(i), i, false, i < metadata))
+	}
+
+	for i := 0; i < metadata-samples; i++ {
+		request.Timeseries = append(request.Timeseries, makeMetadataV2FromST(i, &st))
+	}
+
+	request.Symbols = st.Symbols()
+
+	return request
+}
+
+func TestDistributorPRW2_Push_LabelNameValidation(t *testing.T) {
+	t.Parallel()
+	inputLabels := labels.Labels{
+		{Name: model.MetricNameLabel, Value: "foo"},
+		{Name: "999.illegal", Value: "baz"},
+	}
+	ctx := user.InjectOrgID(context.Background(), "user")
+
+	tests := map[string]struct {
+		inputLabels                labels.Labels
+		skipLabelNameValidationCfg bool
+		skipLabelNameValidationReq bool
+		errExpected                bool
+		errMessage                 string
+	}{
+		"label name validation is on by default": {
+			inputLabels: inputLabels,
+			errExpected: true,
+			errMessage:  `sample invalid label: "999.illegal" metric "foo{999.illegal=\"baz\"}"`,
+		},
+		"label name validation can be skipped via config": {
+			inputLabels:                inputLabels,
+			skipLabelNameValidationCfg: true,
+			errExpected:                false,
+		},
+		"label name validation can be skipped via WriteRequest parameter": {
+			inputLabels:                inputLabels,
+			skipLabelNameValidationReq: true,
+			errExpected:                false,
+		},
+	}
+
+	for testName, tc := range tests {
+		tc := tc
+		for _, histogram := range []bool{true, false} {
+			histogram := histogram
+			t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(histogram)), func(t *testing.T) {
+				t.Parallel()
+				ds, _, _, _ := prepare(t, prepConfig{
+					numIngesters:            2,
+					happyIngesters:          2,
+					numDistributors:         1,
+					shuffleShardSize:        1,
+					skipLabelNameValidation: tc.skipLabelNameValidationCfg,
+				})
+				req := mockWriteRequestV2([]labels.Labels{tc.inputLabels}, 42, 100000, histogram)
+				req.SkipLabelNameValidation = tc.skipLabelNameValidationReq
+				_, err := ds[0].PushV2(ctx, req)
+				if tc.errExpected {
+					fromError, _ := status.FromError(err)
+					assert.Equal(t, tc.errMessage, fromError.Message())
+				} else {
+					assert.Nil(t, err)
+				}
+			})
+		}
+	}
+}
+
+func TestDistributorPRW2_Push_ExemplarValidation(t *testing.T) {
+	t.Parallel()
+	ctx := user.InjectOrgID(context.Background(), "user")
+	manyLabels := []string{model.MetricNameLabel, "test"}
+	for i := 1; i < 31; i++ {
+		manyLabels = append(manyLabels, fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i))
+	}
+
+	tests := map[string]struct {
+		req    *cortexpbv2.WriteRequest
+		errMsg string
+	}{
+		"valid exemplar": {
+			req: makeWriteRequestV2Exemplar([]string{model.MetricNameLabel, "test"}, 1000, []string{"foo", "bar"}),
+		},
+		"rejects exemplar with no labels": {
+			req:    makeWriteRequestV2Exemplar([]string{model.MetricNameLabel, "test"}, 1000, []string{}),
+			errMsg: `exemplar missing labels, timestamp: 1000 series: {__name__="test"} labels: {}`,
+		},
+		"rejects exemplar with no timestamp": {
+			req:    makeWriteRequestV2Exemplar([]string{model.MetricNameLabel, "test"}, 0, []string{"foo", "bar"}),
+			errMsg: `exemplar missing timestamp, timestamp: 0 series: {__name__="test"} labels: {foo="bar"}`,
+		},
+		"rejects exemplar with too long labelset": {
+			req:    makeWriteRequestV2Exemplar([]string{model.MetricNameLabel, "test"}, 1000, []string{"foo", strings.Repeat("0", 126)}),
+			errMsg: fmt.Sprintf(`exemplar combined labelset exceeds 128 characters, timestamp: 1000 series: {__name__="test"} labels: {foo="%s"}`, strings.Repeat("0", 126)),
+		},
+		"rejects exemplar with too many series labels": {
+			req:    makeWriteRequestV2Exemplar(manyLabels, 0, nil),
+			errMsg: "series has too many labels",
+		},
+		"rejects exemplar with duplicate series labels": {
+			req:    makeWriteRequestV2Exemplar([]string{model.MetricNameLabel, "test", "foo", "bar", "foo", "bar"}, 0, nil),
+			errMsg: "duplicate label name",
+		},
+		"rejects exemplar with empty series label name": {
+			req:    makeWriteRequestV2Exemplar([]string{model.MetricNameLabel, "test", "", "bar"}, 0, nil),
+			errMsg: "invalid label",
+		},
+	}
+
+	for testName, tc := range tests {
+		tc := tc
+		t.Run(testName, func(t *testing.T) {
+			t.Parallel()
+			ds, _, _, _ := prepare(t, prepConfig{
+				numIngesters:     2,
+				happyIngesters:   2,
+				numDistributors:  1,
+				shuffleShardSize: 1,
+			})
+			_, err := ds[0].PushV2(ctx, tc.req)
+			if tc.errMsg != "" {
+				fromError, _ := status.FromError(err)
+				assert.Contains(t, fromError.Message(), tc.errMsg)
+			} else {
+				assert.Nil(t, err)
+			}
+		})
+	}
+}
+
+func TestDistributorPRW2_MetricsForLabelMatchers_SingleSlowIngester(t *testing.T) {
+	t.Parallel()
+	for _, histogram := range []bool{true, false} {
+		// Create distributor
+		ds, ing, _, _ := prepare(t, prepConfig{
+			numIngesters:        3,
+			happyIngesters:      3,
+			numDistributors:     1,
+			shardByAllLabels:    true,
+			shuffleShardEnabled: true,
+			shuffleShardSize:    3,
+			replicationFactor:   3,
+		})
+
+		ing[2].queryDelay = 50 * time.Millisecond
+
+		ctx := user.InjectOrgID(context.Background(), "test")
+
+		now := model.Now()
+
+		for i := 0; i < 100; i++ {
+			req := mockWriteRequestV2([]labels.Labels{{{Name: labels.MetricName, Value: "test"}, {Name: "app", Value: "m"}, {Name: "uniq8", Value: strconv.Itoa(i)}}}, 1, now.Unix(), histogram)
+			_, err := ds[0].PushV2(ctx, req)
+			require.NoError(t, err)
+		}
+
+		for i := 0; i < 50; i++ {
+			_, err := ds[0].MetricsForLabelMatchers(ctx, now, now, nil, mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test"))
+			require.NoError(t, err)
+		}
+	}
+}
+
+func TestDistributorPRW2_MetricsForLabelMatchers(t *testing.T) {
+	t.Parallel()
+	const numIngesters = 5
+
+	fixtures := []struct {
+		lbls      labels.Labels
+		value     int64
+		timestamp int64
+	}{
+		{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}}, 1, 100000},
+		{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "500"}}, 1, 110000},
+		{labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000},
+		// The two following series have the same FastFingerprint=e002a3a451262627
+		{labels.Labels{{Name: labels.MetricName, Value: "fast_fingerprint_collision"}, {Name: "app", Value: "l"}, {Name: "uniq0", Value: "0"}, {Name: "uniq1", Value: "1"}}, 1, 300000},
+		{labels.Labels{{Name: labels.MetricName, Value: "fast_fingerprint_collision"}, {Name: "app", Value: "m"}, {Name: "uniq0", Value: "1"}, {Name: "uniq1", Value: "1"}}, 1, 300000},
+	}
+
+	tests := map[string]struct {
+		shuffleShardEnabled bool
+		shuffleShardSize    int
+		matchers            []*labels.Matcher
+		expectedResult      []model.Metric
+		expectedIngesters   int
+		queryLimiter        *limiter.QueryLimiter
+		expectedErr         error
+	}{
+		"should return an empty response if no metric match": {
+			matchers: []*labels.Matcher{
+				mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "unknown"),
+			},
+			expectedResult:    []model.Metric{},
+			expectedIngesters: numIngesters,
+			queryLimiter:      limiter.NewQueryLimiter(0, 0, 0, 0),
+			expectedErr:       nil,
+		},
+		"should filter metrics by single matcher": {
+			matchers: []*labels.Matcher{
+				mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_1"),
+			},
+			expectedResult: []model.Metric{
+				util.LabelsToMetric(fixtures[0].lbls),
+				util.LabelsToMetric(fixtures[1].lbls),
+			},
+			expectedIngesters: numIngesters,
+			queryLimiter:      limiter.NewQueryLimiter(0, 0, 0, 0),
+			expectedErr:       nil,
+		},
+		"should filter metrics by multiple matchers": {
+			matchers: []*labels.Matcher{
+				mustNewMatcher(labels.MatchEqual, "status", "200"),
+				mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_1"),
+			},
+			expectedResult: []model.Metric{
+				util.LabelsToMetric(fixtures[0].lbls),
+			},
+			expectedIngesters: numIngesters,
+			queryLimiter:      limiter.NewQueryLimiter(0, 0, 0, 0),
+			expectedErr:       nil,
+		},
+		"should return all matching metrics even if their FastFingerprint collide": {
+			matchers: []*labels.Matcher{
+				mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "fast_fingerprint_collision"),
+			},
+			expectedResult: []model.Metric{
+				util.LabelsToMetric(fixtures[3].lbls),
+				util.LabelsToMetric(fixtures[4].lbls),
+			},
+			expectedIngesters: numIngesters,
+			queryLimiter:      limiter.NewQueryLimiter(0, 0, 0, 0),
+			expectedErr:       nil,
+		},
+		"should query only ingesters belonging to tenant's subring if shuffle sharding is enabled": {
+			shuffleShardEnabled: true,
+			shuffleShardSize:    3,
+			matchers: []*labels.Matcher{
+				mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_1"),
+			},
+			expectedResult: []model.Metric{
+				util.LabelsToMetric(fixtures[0].lbls),
+				util.LabelsToMetric(fixtures[1].lbls),
+			},
+			expectedIngesters: 3,
+			queryLimiter:      limiter.NewQueryLimiter(0, 0, 0, 0),
+			expectedErr:       nil,
+		},
+		"should query all ingesters if shuffle sharding is enabled but shard size is 0": {
+			shuffleShardEnabled: true,
+			shuffleShardSize:    0,
+			matchers: []*labels.Matcher{
+				mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_1"),
+			},
+			expectedResult: []model.Metric{
+				util.LabelsToMetric(fixtures[0].lbls),
+				util.LabelsToMetric(fixtures[1].lbls),
+			},
+			expectedIngesters: numIngesters,
+			queryLimiter:      limiter.NewQueryLimiter(0, 0, 0, 0),
+			expectedErr:       nil,
+		},
+		"should return err if series limit is exhausted": {
+			shuffleShardEnabled: true,
+			shuffleShardSize:    0,
+			matchers: []*labels.Matcher{
+				mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_1"),
+			},
+			expectedResult:    nil,
+			expectedIngesters: numIngesters,
+			queryLimiter:      limiter.NewQueryLimiter(1, 0, 0, 0),
+			expectedErr:       validation.LimitError(fmt.Sprintf(limiter.ErrMaxSeriesHit, 1)),
+		},
+		"should return err if data bytes limit is exhausted": {
+			shuffleShardEnabled: true,
+			shuffleShardSize:    0,
+			matchers: []*labels.Matcher{
+				mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_1"),
+			},
+			expectedResult:    nil,
+			expectedIngesters: numIngesters,
+			queryLimiter:      limiter.NewQueryLimiter(0, 0, 0, 1),
+			expectedErr:       validation.LimitError(fmt.Sprintf(limiter.ErrMaxDataBytesHit, 1)),
+		},
+		"should not exhaust series limit when only one series is fetched": {
+			matchers: []*labels.Matcher{
+				mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_2"),
+			},
+			expectedResult: []model.Metric{
+				util.LabelsToMetric(fixtures[2].lbls),
+			},
+			expectedIngesters: numIngesters,
+			queryLimiter:      limiter.NewQueryLimiter(1, 0, 0, 0),
+			expectedErr:       nil,
+		},
+	}
+
+	for testName, testData := range tests {
+		testData := testData
+		for _, histogram := range []bool{true, false} {
+			histogram := histogram
+			t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(histogram)), func(t *testing.T) {
+				t.Parallel()
+				now := model.Now()
+
+				// Create distributor
+				ds, ingesters, _, _ := prepare(t, prepConfig{
+					numIngesters:        numIngesters,
+					happyIngesters:      numIngesters,
+					numDistributors:     1,
+					shardByAllLabels:    true,
+					shuffleShardEnabled: testData.shuffleShardEnabled,
+					shuffleShardSize:    testData.shuffleShardSize,
+				})
+
+				// Push fixtures
+				ctx := user.InjectOrgID(context.Background(), "test")
+				ctx = limiter.AddQueryLimiterToContext(ctx, testData.queryLimiter)
+
+				for _, series := range fixtures {
+					req := mockWriteRequestV2([]labels.Labels{series.lbls}, series.value, series.timestamp, histogram)
+					_, err := ds[0].PushV2(ctx, req)
+					require.NoError(t, err)
+				}
+
+				{
+					metrics, err := ds[0].MetricsForLabelMatchers(ctx, now, now, nil, testData.matchers...)
+
+					if testData.expectedErr != nil {
+						assert.ErrorIs(t, err, testData.expectedErr)
+						return
+					}
+
+					require.NoError(t, err)
+					assert.ElementsMatch(t, testData.expectedResult, metrics)
+
+					// Check how many ingesters have been queried.
+					// Due to the quorum the distributor could cancel the last request towards ingesters
+					// if all other ones are successful, so we're good either has been queried X or X-1
+					// ingesters.
+					assert.Contains(t, []int{testData.expectedIngesters, testData.expectedIngesters - 1}, countMockIngestersCalls(ingesters, "MetricsForLabelMatchers"))
+				}
+
+				{
+					metrics, err := ds[0].MetricsForLabelMatchersStream(ctx, now, now, nil, testData.matchers...)
+					if testData.expectedErr != nil {
+						assert.ErrorIs(t, err, testData.expectedErr)
+						return
+					}
+
+					require.NoError(t, err)
+					assert.ElementsMatch(t, testData.expectedResult, metrics)
+
+					assert.Contains(t, []int{testData.expectedIngesters, testData.expectedIngesters - 1}, countMockIngestersCalls(ingesters, "MetricsForLabelMatchersStream"))
+				}
+			})
+		}
+	}
+}
+
+func BenchmarkDistributorPRW2_MetricsForLabelMatchers(b *testing.B) {
+	const (
+		numIngesters        = 100
+		numSeriesPerRequest = 100
+	)
+
+	tests := map[string]struct {
+		prepareConfig func(limits *validation.Limits)
+		prepareSeries func() ([]labels.Labels, []cortexpbv2.Sample)
+		matchers      []*labels.Matcher
+		queryLimiter  *limiter.QueryLimiter
+		expectedErr   error
+	}{
+		"get series within limits": {
+			prepareConfig: func(limits *validation.Limits) {},
+			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+				metrics := make([]labels.Labels, numSeriesPerRequest)
+				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+
+				for i := 0; i < numSeriesPerRequest; i++ {
+					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: fmt.Sprintf("foo_%d", i)}})
+					for i := 0; i < 10; i++ {
+						lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i))
+					}
+
+					metrics[i] = lbls.Labels()
+					samples[i] = cortexpbv2.Sample{
+						Value:     float64(i),
+						Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+					}
+				}
+
+				return metrics, samples
+			},
+			matchers: []*labels.Matcher{
+				mustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, "foo.+"),
+			},
+			queryLimiter: limiter.NewQueryLimiter(100, 0, 0, 0),
+			expectedErr:  nil,
+		},
+	}
+
+	for testName, testData := range tests {
+		b.Run(testName, func(b *testing.B) {
+			// Create distributor
+			ds, ingesters, _, _ := prepare(b, prepConfig{
+				numIngesters:        numIngesters,
+				happyIngesters:      numIngesters,
+				numDistributors:     1,
+				shardByAllLabels:    true,
+				shuffleShardEnabled: false,
+				shuffleShardSize:    0,
+			})
+
+			// Push fixtures
+			ctx := user.InjectOrgID(context.Background(), "test")
+			ctx = limiter.AddQueryLimiterToContext(ctx, testData.queryLimiter)
+
+			// Prepare the series to remote write before starting the benchmark.
+			metrics, samples := testData.prepareSeries()
+
+			if _, err := ds[0].PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)); err != nil {
+				b.Fatalf("error pushing to distributor %v", err)
+			}
+
+			// Run the benchmark.
+			b.ReportAllocs()
+			b.ResetTimer()
+
+			for n := 0; n < b.N; n++ {
+				now := model.Now()
+				metrics, err := ds[0].MetricsForLabelMatchers(ctx, now, now, nil, testData.matchers...)
+
+				if testData.expectedErr != nil {
+					assert.EqualError(b, err, testData.expectedErr.Error())
+					return
+				}
+
+				require.NoError(b, err)
+
+				// Check how many ingesters have been queried.
+				// Due to the quorum the distributor could cancel the last request towards ingesters
+				// if all other ones are successful, so we're good either has been queried X or X-1
+				// ingesters.
+				assert.Contains(b, []int{numIngesters, numIngesters - 1}, countMockIngestersCalls(ingesters, "MetricsForLabelMatchers"))
+				assert.Equal(b, numSeriesPerRequest, len(metrics))
+			}
+		})
+	}
+}
+
+func TestDistributorPRW2_MetricsMetadata(t *testing.T) {
+	t.Parallel()
+	const numIngesters = 5
+
+	tests := map[string]struct {
+		shuffleShardEnabled bool
+		shuffleShardSize    int
+		expectedIngesters   int
+	}{
+		"should query all ingesters if shuffle sharding is disabled": {
+			shuffleShardEnabled: false,
+			expectedIngesters:   numIngesters,
+		},
+		"should query all ingesters if shuffle sharding is enabled but shard size is 0": {
+			shuffleShardEnabled: true,
+			shuffleShardSize:    0,
+			expectedIngesters:   numIngesters,
+		},
+		"should query only ingesters belonging to tenant's subring if shuffle sharding is enabled": {
+			shuffleShardEnabled: true,
+			shuffleShardSize:    3,
+			expectedIngesters:   3,
+		},
+	}
+
+	for testName, testData := range tests {
+		testData := testData
+		t.Run(testName, func(t *testing.T) {
+			t.Parallel()
+			// Create distributor
+			ds, ingesters, _, _ := prepare(t, prepConfig{
+				numIngesters:        numIngesters,
+				happyIngesters:      numIngesters,
+				numDistributors:     1,
+				shardByAllLabels:    true,
+				shuffleShardEnabled: testData.shuffleShardEnabled,
+				shuffleShardSize:    testData.shuffleShardSize,
+				limits:              nil,
+			})
+
+			// Push metadata
+			ctx := user.InjectOrgID(context.Background(), "test")
+
+			req := makeWriteRequestV2WithSamples(0, 0, 10)
+			_, err := ds[0].PushV2(ctx, req)
+			require.NoError(t, err)
+
+			// Assert on metric metadata
+			metadata, err := ds[0].MetricsMetadata(ctx)
+			require.NoError(t, err)
+			assert.Equal(t, 10, len(metadata))
+
+			// Check how many ingesters have been queried.
+			// Due to the quorum the distributor could cancel the last request towards ingesters
+			// if all other ones are successful, so we're good either has been queried X or X-1
+			// ingesters.
+			assert.Contains(t, []int{testData.expectedIngesters, testData.expectedIngesters - 1}, countMockIngestersCalls(ingesters, "MetricsMetadata"))
+		})
+	}
+}
+
+func makeWriteRequestV2WithHistogram(startTimestampMs int64, histogram int, metadata int) *cortexpbv2.WriteRequest {
+	request := &cortexpbv2.WriteRequest{}
+	st := writev2.NewSymbolTable()
+	st.SymbolizeLabels(labels.Labels{{Name: "__name__", Value: "foo"}, {Name: "bar", Value: "baz"}}, nil)
+
+	for i := 0; i < histogram; i++ {
+		st.SymbolizeLabels(labels.Labels{{Name: "histogram", Value: fmt.Sprintf("%d", i)}}, nil)
+		request.Timeseries = append(request.Timeseries, makeTimeseriesV2FromST(
+			[]cortexpb.LabelAdapter{
+				{Name: model.MetricNameLabel, Value: "foo"},
+				{Name: "bar", Value: "baz"},
+				{Name: "histogram", Value: fmt.Sprintf("%d", i)},
+			}, &st, startTimestampMs+int64(i), i, true, i < metadata))
+	}
+
+	for i := 0; i < metadata-histogram; i++ {
+		request.Timeseries = append(request.Timeseries, makeMetadataV2FromST(i, &st))
+	}
+
+	request.Symbols = st.Symbols()
+
+	return request
+}
+
+func makeMetadataV2FromST(value int, st *writev2.SymbolsTable) cortexpbv2.PreallocTimeseriesV2 {
+	t := cortexpbv2.PreallocTimeseriesV2{
+		TimeSeries: &cortexpbv2.TimeSeries{
+			LabelsRefs: []uint32{1, 2},
+		},
+	}
+	helpRef := st.Symbolize(fmt.Sprintf("a help for metric_%d", value))
+	t.Metadata.Type = cortexpbv2.METRIC_TYPE_COUNTER
+	t.Metadata.HelpRef = helpRef
+
+	return t
+}
+
+func makeTimeseriesV2FromST(labels []cortexpb.LabelAdapter, st *writev2.SymbolsTable, ts int64, value int, histogram bool, metadata bool) cortexpbv2.PreallocTimeseriesV2 {
+	var helpRef uint32
+	if metadata {
+		helpRef = st.Symbolize(fmt.Sprintf("a help for metric_%d", value))
+	}
+
+	t := cortexpbv2.PreallocTimeseriesV2{
+		TimeSeries: &cortexpbv2.TimeSeries{
+			LabelsRefs: st.SymbolizeLabels(cortexpb.FromLabelAdaptersToLabels(labels), nil),
+		},
+	}
+	if metadata {
+		t.Metadata.Type = cortexpbv2.METRIC_TYPE_COUNTER
+		t.Metadata.HelpRef = helpRef
+	}
+
+	if histogram {
+		t.Histograms = append(t.Histograms, cortexpbv2.HistogramToHistogramProto(ts, tsdbutil.GenerateTestHistogram(value)))
+	} else {
+		t.Samples = append(t.Samples, cortexpbv2.Sample{
+			Timestamp: ts,
+			Value:     float64(value),
+		})
+	}
+
+	return t
+}
+
+func makeWriteRequestV2Timeseries(labels []cortexpb.LabelAdapter, ts int64, value int, histogram bool, metadata bool) cortexpbv2.PreallocTimeseriesV2 {
+	st := writev2.NewSymbolTable()
+	st.SymbolizeLabels(cortexpb.FromLabelAdaptersToLabels(labels), nil)
+
+	var helpRef uint32
+	if metadata {
+		helpRef = st.Symbolize(fmt.Sprintf("a help for metric_%d", value))
+	}
+
+	t := cortexpbv2.PreallocTimeseriesV2{
+		TimeSeries: &cortexpbv2.TimeSeries{
+			LabelsRefs: st.SymbolizeLabels(cortexpb.FromLabelAdaptersToLabels(labels), nil),
+		},
+	}
+	if metadata {
+		t.Metadata.Type = cortexpbv2.METRIC_TYPE_COUNTER
+		t.Metadata.HelpRef = helpRef
+	}
+
+	if histogram {
+		t.Histograms = append(t.Histograms, cortexpbv2.HistogramToHistogramProto(ts, tsdbutil.GenerateTestHistogram(value)))
+	} else {
+		t.Samples = append(t.Samples, cortexpbv2.Sample{
+			Timestamp: ts,
+			Value:     float64(value),
+		})
+	}
+
+	return t
+}
+
+func makeWriteRequestV2Exemplar(seriesLabels []string, timestamp int64, exemplarLabels []string) *cortexpbv2.WriteRequest {
+	st := writev2.NewSymbolTable()
+	for _, l := range seriesLabels {
+		st.Symbolize(l)
+	}
+	for _, l := range exemplarLabels {
+		st.Symbolize(l)
+	}
+
+	return &cortexpbv2.WriteRequest{
+		Symbols: st.Symbols(),
+		Timeseries: []cortexpbv2.PreallocTimeseriesV2{
+			{
+				TimeSeries: &cortexpbv2.TimeSeries{
+					LabelsRefs: cortexpbv2.GetLabelRefsFromLabelAdapters(st.Symbols(), cortexpb.FromLabelsToLabelAdapters(labels.FromStrings(seriesLabels...))),
+					Exemplars: []cortexpbv2.Exemplar{
+						{
+							LabelsRefs: cortexpbv2.GetLabelRefsFromLabelAdapters(st.Symbols(), cortexpb.FromLabelsToLabelAdapters(labels.FromStrings(exemplarLabels...))),
+							Timestamp:  timestamp,
+						},
+					},
+				},
+			},
+		},
+	}
+}
+
+func mockWriteRequestV2(lbls []labels.Labels, value int64, timestamp int64, histogram bool) *cortexpbv2.WriteRequest {
+	var (
+		samples    []cortexpbv2.Sample
+		histograms []cortexpbv2.Histogram
+	)
+	if histogram {
+		histograms = make([]cortexpbv2.Histogram, len(lbls))
+		for i := range lbls {
+			histograms[i] = cortexpbv2.HistogramToHistogramProto(timestamp, tsdbutil.GenerateTestHistogram(int(value)))
+		}
+	} else {
+		samples = make([]cortexpbv2.Sample, len(lbls))
+		for i := range lbls {
+			samples[i] = cortexpbv2.Sample{
+				Timestamp: timestamp,
+				Value:     float64(value),
+			}
+		}
+	}
+
+	return cortexpbv2.ToWriteRequestV2(lbls, samples, histograms, nil, cortexpbv2.API)
+}
+
+func makeWriteRequestHAV2(samples int, replica, cluster string, histogram bool) *cortexpbv2.WriteRequest {
+	request := &cortexpbv2.WriteRequest{}
+	st := writev2.NewSymbolTable()
+	for i := 0; i < samples; i++ {
+		ts := cortexpbv2.PreallocTimeseriesV2{
+			TimeSeries: &cortexpbv2.TimeSeries{
+				LabelsRefs: st.SymbolizeLabels(labels.Labels{{Name: "__name__", Value: "foo"}, {Name: "__replica__", Value: replica}, {Name: "bar", Value: "baz"}, {Name: "cluster", Value: cluster}, {Name: "sample", Value: fmt.Sprintf("%d", i)}}, nil),
+			},
+		}
+		if histogram {
+			ts.Histograms = []cortexpbv2.Histogram{
+				cortexpbv2.HistogramToHistogramProto(int64(i), tsdbutil.GenerateTestHistogram(i)),
+			}
+		} else {
+			ts.Samples = []cortexpbv2.Sample{
+				{
+					Value:     float64(i),
+					Timestamp: int64(i),
+				},
+			}
+		}
+		request.Timeseries = append(request.Timeseries, ts)
+	}
+	request.Symbols = st.Symbols()
+	return request
+}
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index dce3d05c91..68d3353cf8 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -35,6 +35,7 @@ import (
 
 	promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding"
 	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/ha"
 	"github.com/cortexproject/cortex/pkg/ingester"
 	"github.com/cortexproject/cortex/pkg/ingester/client"
@@ -3069,6 +3070,77 @@ func (i *mockIngester) PushPreAlloc(ctx context.Context, in *cortexpb.PreallocWr
 	return i.Push(ctx, &in.WriteRequest, opts...)
 }
 
+func (i *mockIngester) PushPreAllocV2(ctx context.Context, in *cortexpbv2.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) {
+	return i.PushV2(ctx, &in.WriteRequest, opts...)
+}
+
+func (i *mockIngester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) {
+	i.Lock()
+	defer i.Unlock()
+
+	i.trackCall("PushV2")
+
+	if !i.happy.Load() {
+		return nil, i.failResp.Load()
+	}
+
+	if i.timeseries == nil {
+		i.timeseries = map[uint32]*cortexpb.PreallocTimeseries{}
+	}
+
+	if i.metadata == nil {
+		i.metadata = map[uint32]map[cortexpb.MetricMetadata]struct{}{}
+	}
+
+	orgid, err := tenant.TenantID(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	b := labels.NewScratchBuilder(0)
+
+	for _, series := range req.Timeseries {
+		tsLabels := series.ToLabels(&b, req.Symbols)
+		labels := cortexpb.FromLabelsToLabelAdapters(tsLabels)
+		hash := shardByAllLabels(orgid, labels)
+		existing, ok := i.timeseries[hash]
+		var v1Sample []cortexpb.Sample
+		for _, s := range series.Samples {
+			v1Sample = append(v1Sample, cortexpb.Sample{
+				Value:       s.Value,
+				TimestampMs: s.Timestamp,
+			})
+		}
+		if !ok {
+			// Make a copy because the request Timeseries are reused
+			item := cortexpb.TimeSeries{
+				Labels:  make([]cortexpb.LabelAdapter, len(labels)),
+				Samples: make([]cortexpb.Sample, len(v1Sample)),
+			}
+
+			copy(item.Labels, labels)
+			copy(item.Samples, v1Sample)
+
+			i.timeseries[hash] = &cortexpb.PreallocTimeseries{TimeSeries: &item}
+		} else {
+			existing.Samples = append(existing.Samples, v1Sample...)
+		}
+
+		if series.Metadata.Type != cortexpbv2.METRIC_TYPE_UNSPECIFIED {
+			m := series.Metadata.ToV1Metadata(tsLabels.Get(model.MetricNameLabel), req.Symbols)
+			hash = shardByMetricName(orgid, m.MetricFamilyName)
+			set, ok := i.metadata[hash]
+			if !ok {
+				set = map[cortexpb.MetricMetadata]struct{}{}
+				i.metadata[hash] = set
+			}
+			set[*m] = struct{}{}
+		}
+	}
+
+	return &cortexpbv2.WriteResponse{}, nil
+}
+
 func (i *mockIngester) Push(ctx context.Context, req *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) {
 	i.Lock()
 	defer i.Unlock()
@@ -3319,6 +3391,10 @@ func (i *noopIngester) PushPreAlloc(ctx context.Context, in *cortexpb.PreallocWr
 	return nil, nil
 }
 
+func (i *noopIngester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) {
+	return nil, nil
+}
+
 func (i *noopIngester) Push(ctx context.Context, req *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) {
 	return nil, nil
 }
diff --git a/pkg/distributor/distributorpb/distributor.pb.go b/pkg/distributor/distributorpb/distributor.pb.go
index 9711c9efe6..5946061f32 100644
--- a/pkg/distributor/distributorpb/distributor.pb.go
+++ b/pkg/distributor/distributorpb/distributor.pb.go
@@ -7,6 +7,7 @@ import (
 	context "context"
 	fmt "fmt"
 	cortexpb "github.com/cortexproject/cortex/pkg/cortexpb"
+	cortexpbv2 "github.com/cortexproject/cortex/pkg/cortexpbv2"
 	_ "github.com/gogo/protobuf/gogoproto"
 	proto "github.com/gogo/protobuf/proto"
 	grpc "google.golang.org/grpc"
@@ -29,21 +30,23 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 func init() { proto.RegisterFile("distributor.proto", fileDescriptor_c518e33639ca565d) }
 
 var fileDescriptor_c518e33639ca565d = []byte{
-	// 212 bytes of a gzipped FileDescriptorProto
+	// 245 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0xc9, 0x2c, 0x2e,
 	0x29, 0xca, 0x4c, 0x2a, 0x2d, 0xc9, 0x2f, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x46,
 	0x12, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xeb, 0x83, 0x58, 0x10, 0x25, 0x52, 0x96,
 	0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xc9, 0xf9, 0x45, 0x25, 0xa9,
 	0x15, 0x05, 0x45, 0xf9, 0x59, 0xa9, 0xc9, 0x25, 0x50, 0x9e, 0x7e, 0x41, 0x76, 0x3a, 0x4c, 0x22,
-	0x09, 0xca, 0x80, 0x68, 0x35, 0xf2, 0xe0, 0xe2, 0x76, 0x41, 0x98, 0x2f, 0x64, 0xc9, 0xc5, 0x12,
-	0x50, 0x5a, 0x9c, 0x21, 0x24, 0xa6, 0x07, 0x53, 0xae, 0x17, 0x5e, 0x94, 0x59, 0x92, 0x1a, 0x94,
-	0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x22, 0x25, 0x8e, 0x21, 0x5e, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0xaa,
-	0xc4, 0xe0, 0xe4, 0x7c, 0xe1, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31,
-	0x36, 0x3c, 0x92, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4,
-	0x18, 0x1f, 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x09, 0x8f,
-	0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x8a, 0x17, 0xc9, 0x77, 0x05,
-	0x49, 0x49, 0x6c, 0x60, 0x57, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x5b, 0x37, 0x6f,
-	0x08, 0x01, 0x00, 0x00,
+	0x09, 0xca, 0x80, 0x6a, 0xb5, 0x25, 0x45, 0x6b, 0x99, 0x11, 0x94, 0x59, 0x66, 0x04, 0xd1, 0x6e,
+	0xd4, 0xc9, 0xc8, 0xc5, 0xed, 0x82, 0x70, 0x9f, 0x90, 0x25, 0x17, 0x4b, 0x40, 0x69, 0x71, 0x86,
+	0x90, 0x98, 0x1e, 0x4c, 0x8f, 0x5e, 0x78, 0x51, 0x66, 0x49, 0x6a, 0x50, 0x6a, 0x61, 0x69, 0x6a,
+	0x71, 0x89, 0x94, 0x38, 0x86, 0x78, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x12, 0x83, 0x90, 0x3d,
+	0x17, 0x1b, 0x48, 0x6b, 0x98, 0x91, 0x90, 0x84, 0x1e, 0xc2, 0x42, 0x54, 0xed, 0x92, 0x58, 0x64,
+	0x60, 0x06, 0x38, 0x39, 0x5f, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72,
+	0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23,
+	0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2,
+	0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xe2, 0x45, 0x0a, 0xde,
+	0x82, 0xa4, 0x24, 0x36, 0xb0, 0xbf, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x56, 0x3b,
+	0xcc, 0x89, 0x01, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -59,6 +62,7 @@ const _ = grpc.SupportPackageIsVersion4
 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type DistributorClient interface {
 	Push(ctx context.Context, in *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error)
+	PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error)
 }
 
 type distributorClient struct {
@@ -78,9 +82,19 @@ func (c *distributorClient) Push(ctx context.Context, in *cortexpb.WriteRequest,
 	return out, nil
 }
 
+func (c *distributorClient) PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) {
+	out := new(cortexpbv2.WriteResponse)
+	err := c.cc.Invoke(ctx, "/distributor.Distributor/PushV2", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
 // DistributorServer is the server API for Distributor service.
 type DistributorServer interface {
 	Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error)
+	PushV2(context.Context, *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error)
 }
 
 // UnimplementedDistributorServer can be embedded to have forward compatible implementations.
@@ -90,6 +104,9 @@ type UnimplementedDistributorServer struct {
 func (*UnimplementedDistributorServer) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) {
 	return nil, status.Errorf(codes.Unimplemented, "method Push not implemented")
 }
+func (*UnimplementedDistributorServer) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method PushV2 not implemented")
+}
 
 func RegisterDistributorServer(s *grpc.Server, srv DistributorServer) {
 	s.RegisterService(&_Distributor_serviceDesc, srv)
@@ -113,6 +130,24 @@ func _Distributor_Push_Handler(srv interface{}, ctx context.Context, dec func(in
 	return interceptor(ctx, in, info, handler)
 }
 
+func _Distributor_PushV2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(cortexpbv2.WriteRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(DistributorServer).PushV2(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/distributor.Distributor/PushV2",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(DistributorServer).PushV2(ctx, req.(*cortexpbv2.WriteRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
 var _Distributor_serviceDesc = grpc.ServiceDesc{
 	ServiceName: "distributor.Distributor",
 	HandlerType: (*DistributorServer)(nil),
@@ -121,6 +156,10 @@ var _Distributor_serviceDesc = grpc.ServiceDesc{
 			MethodName: "Push",
 			Handler:    _Distributor_Push_Handler,
 		},
+		{
+			MethodName: "PushV2",
+			Handler:    _Distributor_PushV2_Handler,
+		},
 	},
 	Streams:  []grpc.StreamDesc{},
 	Metadata: "distributor.proto",
diff --git a/pkg/distributor/distributorpb/distributor.proto b/pkg/distributor/distributorpb/distributor.proto
index 5319ba44a1..93234f1abb 100644
--- a/pkg/distributor/distributorpb/distributor.proto
+++ b/pkg/distributor/distributorpb/distributor.proto
@@ -6,10 +6,12 @@ option go_package = "distributorpb";
 
 import "gogoproto/gogo.proto";
 import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto";
+import "github.com/cortexproject/cortex/pkg/cortexpbv2/cortexv2.proto";
 
 option (gogoproto.marshaler_all) = true;
 option (gogoproto.unmarshaler_all) = true;
 
 service Distributor {
   rpc Push(cortexpb.WriteRequest) returns (cortexpb.WriteResponse) {};
+  rpc PushV2(cortexpbv2.WriteRequest) returns (cortexpbv2.WriteResponse) {};
 }
diff --git a/pkg/distributor/stats.go b/pkg/distributor/stats.go
new file mode 100644
index 0000000000..0f7fbc332d
--- /dev/null
+++ b/pkg/distributor/stats.go
@@ -0,0 +1,62 @@
+package distributor
+
+import (
+	"go.uber.org/atomic"
+)
+
+type WriteStats struct {
+	// Samples represents X-Prometheus-Remote-Write-Written-Samples
+	Samples atomic.Int64
+	// Histograms represents X-Prometheus-Remote-Write-Written-Histograms
+	Histograms atomic.Int64
+	// Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars
+	Exemplars atomic.Int64
+}
+
+func (w *WriteStats) SetSamples(samples int64) {
+	if w == nil {
+		return
+	}
+
+	w.Samples.Store(samples)
+}
+
+func (w *WriteStats) SetHistograms(histograms int64) {
+	if w == nil {
+		return
+	}
+
+	w.Histograms.Store(histograms)
+}
+
+func (w *WriteStats) SetExemplars(exemplars int64) {
+	if w == nil {
+		return
+	}
+
+	w.Exemplars.Store(exemplars)
+}
+
+func (w *WriteStats) LoadSamples() int64 {
+	if w == nil {
+		return 0
+	}
+
+	return w.Samples.Load()
+}
+
+func (w *WriteStats) LoadHistogram() int64 {
+	if w == nil {
+		return 0
+	}
+
+	return w.Histograms.Load()
+}
+
+func (w *WriteStats) LoadExemplars() int64 {
+	if w == nil {
+		return 0
+	}
+
+	return w.Exemplars.Load()
+}
diff --git a/pkg/distributor/stats_test.go b/pkg/distributor/stats_test.go
new file mode 100644
index 0000000000..10f0bf87b2
--- /dev/null
+++ b/pkg/distributor/stats_test.go
@@ -0,0 +1,41 @@
+package distributor
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func Test_SetAndLoad(t *testing.T) {
+	s := &WriteStats{}
+
+	t.Run("Samples", func(t *testing.T) {
+		s.SetSamples(3)
+		assert.Equal(t, int64(3), s.LoadSamples())
+	})
+	t.Run("Histograms", func(t *testing.T) {
+		s.SetHistograms(10)
+		assert.Equal(t, int64(10), s.LoadHistogram())
+	})
+	t.Run("Exemplars", func(t *testing.T) {
+		s.SetExemplars(2)
+		assert.Equal(t, int64(2), s.LoadExemplars())
+	})
+}
+
+func Test_NilReceiver(t *testing.T) {
+	var s *WriteStats
+
+	t.Run("Samples", func(t *testing.T) {
+		s.SetSamples(3)
+		assert.Equal(t, int64(0), s.LoadSamples())
+	})
+	t.Run("Histograms", func(t *testing.T) {
+		s.SetHistograms(10)
+		assert.Equal(t, int64(0), s.LoadHistogram())
+	})
+	t.Run("Exemplars", func(t *testing.T) {
+		s.SetExemplars(2)
+		assert.Equal(t, int64(0), s.LoadExemplars())
+	})
+}
diff --git a/pkg/ingester/client/client.go b/pkg/ingester/client/client.go
index b1c5a8b28a..b8b3ddefe9 100644
--- a/pkg/ingester/client/client.go
+++ b/pkg/ingester/client/client.go
@@ -5,6 +5,7 @@ import (
 	"flag"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/util/grpcclient"
 	"github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock"
 
@@ -43,6 +44,7 @@ type HealthAndIngesterClient interface {
 	grpc_health_v1.HealthClient
 	Close() error
 	PushPreAlloc(ctx context.Context, in *cortexpb.PreallocWriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error)
+	PushPreAllocV2(ctx context.Context, in *cortexpbv2.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error)
 }
 
 type closableHealthAndIngesterClient struct {
@@ -55,6 +57,17 @@ type closableHealthAndIngesterClient struct {
 	inflightPushRequests    *prometheus.GaugeVec
 }
 
+func (c *closableHealthAndIngesterClient) PushPreAllocV2(ctx context.Context, in *cortexpbv2.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) {
+	return c.handlePushRequestV2(func() (*cortexpbv2.WriteResponse, error) {
+		out := new(cortexpbv2.WriteResponse)
+		err := c.conn.Invoke(ctx, "/cortex.Ingester/PushV2", in, out, opts...)
+		if err != nil {
+			return nil, err
+		}
+		return out, nil
+	})
+}
+
 func (c *closableHealthAndIngesterClient) PushPreAlloc(ctx context.Context, in *cortexpb.PreallocWriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) {
 	return c.handlePushRequest(func() (*cortexpb.WriteResponse, error) {
 		out := new(cortexpb.WriteResponse)
@@ -72,6 +85,24 @@ func (c *closableHealthAndIngesterClient) Push(ctx context.Context, in *cortexpb
 	})
 }
 
+func (c *closableHealthAndIngesterClient) PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) {
+	return c.handlePushRequestV2(func() (*cortexpbv2.WriteResponse, error) {
+		return c.IngesterClient.PushV2(ctx, in, opts...)
+	})
+}
+
+func (c *closableHealthAndIngesterClient) handlePushRequestV2(mainFunc func() (*cortexpbv2.WriteResponse, error)) (*cortexpbv2.WriteResponse, error) {
+	currentInflight := c.inflightRequests.Inc()
+	c.inflightPushRequests.WithLabelValues(c.addr).Set(float64(currentInflight))
+	defer func() {
+		c.inflightPushRequests.WithLabelValues(c.addr).Set(float64(c.inflightRequests.Dec()))
+	}()
+	if c.maxInflightPushRequests > 0 && currentInflight > c.maxInflightPushRequests {
+		return nil, errTooManyInflightPushRequests
+	}
+	return mainFunc()
+}
+
 func (c *closableHealthAndIngesterClient) handlePushRequest(mainFunc func() (*cortexpb.WriteResponse, error)) (*cortexpb.WriteResponse, error) {
 	currentInflight := c.inflightRequests.Inc()
 	c.inflightPushRequests.WithLabelValues(c.addr).Set(float64(currentInflight))
diff --git a/pkg/ingester/client/cortex_mock_test.go b/pkg/ingester/client/cortex_mock_test.go
index fd98c77082..5aff8bf302 100644
--- a/pkg/ingester/client/cortex_mock_test.go
+++ b/pkg/ingester/client/cortex_mock_test.go
@@ -6,12 +6,18 @@ import (
 	"github.com/stretchr/testify/mock"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 )
 
 type IngesterServerMock struct {
 	mock.Mock
 }
 
+func (m *IngesterServerMock) PushV2(ctx context.Context, r *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+	args := m.Called(ctx, r)
+	return args.Get(0).(*cortexpbv2.WriteResponse), args.Error(1)
+}
+
 func (m *IngesterServerMock) Push(ctx context.Context, r *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) {
 	args := m.Called(ctx, r)
 	return args.Get(0).(*cortexpb.WriteResponse), args.Error(1)
diff --git a/pkg/ingester/client/ingester.pb.go b/pkg/ingester/client/ingester.pb.go
index 374348afae..e587962642 100644
--- a/pkg/ingester/client/ingester.pb.go
+++ b/pkg/ingester/client/ingester.pb.go
@@ -12,6 +12,7 @@ import (
 	fmt "fmt"
 	cortexpb "github.com/cortexproject/cortex/pkg/cortexpb"
 	github_com_cortexproject_cortex_pkg_cortexpb "github.com/cortexproject/cortex/pkg/cortexpb"
+	cortexpbv2 "github.com/cortexproject/cortex/pkg/cortexpbv2"
 	_ "github.com/gogo/protobuf/gogoproto"
 	proto "github.com/gogo/protobuf/proto"
 	grpc "google.golang.org/grpc"
@@ -1484,91 +1485,93 @@ func init() {
 func init() { proto.RegisterFile("ingester.proto", fileDescriptor_60f6df4f3586b478) }
 
 var fileDescriptor_60f6df4f3586b478 = []byte{
-	// 1339 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4b, 0x6f, 0x14, 0xc7,
-	0x13, 0xdf, 0xf1, 0x3e, 0xec, 0xad, 0x7d, 0xb0, 0x6e, 0x1b, 0xbc, 0x0c, 0x7f, 0xc6, 0x30, 0x88,
-	0x7f, 0xac, 0x24, 0xd8, 0xe0, 0x24, 0x12, 0xe4, 0x85, 0x6c, 0x30, 0x60, 0xc0, 0x18, 0xc6, 0x86,
-	0x44, 0x51, 0xa2, 0xd1, 0x78, 0xb7, 0xb1, 0x27, 0xcc, 0x63, 0x99, 0xee, 0x41, 0x90, 0x53, 0xa2,
-	0x7c, 0x80, 0xe4, 0x98, 0x6b, 0x6e, 0xf9, 0x00, 0xf9, 0x10, 0x1c, 0x39, 0xe4, 0x80, 0x72, 0x40,
-	0x61, 0x91, 0xa2, 0x1c, 0xc9, 0x37, 0x88, 0xa6, 0x1f, 0xf3, 0xf2, 0xf8, 0x41, 0x04, 0xb9, 0xed,
-	0x54, 0xfd, 0xaa, 0xba, 0xea, 0xd7, 0x55, 0x5d, 0xb5, 0xd0, 0xb6, 0xbd, 0x4d, 0x4c, 0x28, 0x0e,
-	0x66, 0x07, 0x81, 0x4f, 0x7d, 0x54, 0xeb, 0xf9, 0x01, 0xc5, 0x0f, 0xd5, 0xc9, 0x4d, 0x7f, 0xd3,
-	0x67, 0xa2, 0xb9, 0xe8, 0x17, 0xd7, 0xaa, 0xe7, 0x36, 0x6d, 0xba, 0x15, 0x6e, 0xcc, 0xf6, 0x7c,
-	0x77, 0x8e, 0x03, 0x07, 0x81, 0xff, 0x35, 0xee, 0x51, 0xf1, 0x35, 0x37, 0xb8, 0xb7, 0x29, 0x15,
-	0x1b, 0xe2, 0x07, 0x37, 0xd5, 0x3f, 0x81, 0x86, 0x81, 0xad, 0xbe, 0x81, 0xef, 0x87, 0x98, 0x50,
-	0x34, 0x0b, 0xa3, 0xf7, 0x43, 0x1c, 0xd8, 0x98, 0x74, 0x95, 0x63, 0xe5, 0x99, 0xc6, 0xfc, 0xe4,
-	0xac, 0x80, 0xdf, 0x0a, 0x71, 0xf0, 0x48, 0xc0, 0x0c, 0x09, 0xd2, 0xcf, 0x43, 0x93, 0x9b, 0x93,
-	0x81, 0xef, 0x11, 0x8c, 0xe6, 0x60, 0x34, 0xc0, 0x24, 0x74, 0xa8, 0xb4, 0x3f, 0x98, 0xb3, 0xe7,
-	0x38, 0x43, 0xa2, 0xf4, 0x6b, 0xd0, 0xca, 0x68, 0xd0, 0x87, 0x00, 0xd4, 0x76, 0x31, 0x29, 0x0a,
-	0x62, 0xb0, 0x31, 0xbb, 0x6e, 0xbb, 0x78, 0x8d, 0xe9, 0x16, 0x2b, 0x8f, 0x9f, 0x4d, 0x97, 0x8c,
-	0x14, 0x5a, 0xff, 0x49, 0x81, 0x66, 0x3a, 0x4e, 0xf4, 0x2e, 0x20, 0x42, 0xad, 0x80, 0x9a, 0x0c,
-	0x44, 0x2d, 0x77, 0x60, 0xba, 0x91, 0x53, 0x65, 0xa6, 0x6c, 0x74, 0x98, 0x66, 0x5d, 0x2a, 0x56,
-	0x08, 0x9a, 0x81, 0x0e, 0xf6, 0xfa, 0x59, 0xec, 0x08, 0xc3, 0xb6, 0xb1, 0xd7, 0x4f, 0x23, 0x4f,
-	0xc3, 0x98, 0x6b, 0xd1, 0xde, 0x16, 0x0e, 0x48, 0xb7, 0x9c, 0xe5, 0xe9, 0xba, 0xb5, 0x81, 0x9d,
-	0x15, 0xae, 0x34, 0x62, 0x94, 0xfe, 0xb3, 0x02, 0x93, 0x4b, 0x0f, 0xb1, 0x3b, 0x70, 0xac, 0xe0,
-	0x3f, 0x09, 0xf1, 0xcc, 0xb6, 0x10, 0x0f, 0x16, 0x85, 0x48, 0x52, 0x31, 0x7e, 0x09, 0x13, 0x2c,
-	0xb4, 0x35, 0x1a, 0x60, 0xcb, 0x8d, 0x6f, 0xe4, 0x3c, 0x34, 0x7a, 0x5b, 0xa1, 0x77, 0x2f, 0x73,
-	0x25, 0x53, 0xd2, 0x59, 0x72, 0x21, 0x17, 0x22, 0x90, 0xb8, 0x95, 0xb4, 0xc5, 0xd5, 0xca, 0xd8,
-	0x48, 0xa7, 0xac, 0xaf, 0xc1, 0xc1, 0x1c, 0x01, 0xaf, 0xe1, 0xc6, 0x7f, 0x53, 0x00, 0xb1, 0x74,
-	0xee, 0x58, 0x4e, 0x88, 0x89, 0x24, 0xf5, 0x28, 0x80, 0x13, 0x49, 0x4d, 0xcf, 0x72, 0x31, 0x23,
-	0xb3, 0x6e, 0xd4, 0x99, 0xe4, 0x86, 0xe5, 0xe2, 0x1d, 0x38, 0x1f, 0x79, 0x05, 0xce, 0xcb, 0x7b,
-	0x72, 0x5e, 0x39, 0xa6, 0xec, 0x83, 0x73, 0x34, 0x09, 0x55, 0xc7, 0x76, 0x6d, 0xda, 0xad, 0x32,
-	0x8f, 0xfc, 0x43, 0x3f, 0x0b, 0x13, 0x99, 0xac, 0x04, 0x53, 0xc7, 0xa1, 0xc9, 0xd3, 0x7a, 0xc0,
-	0xe4, 0x8c, 0xab, 0xba, 0xd1, 0x70, 0x12, 0xa8, 0xfe, 0x29, 0x1c, 0x4e, 0x59, 0xe6, 0x6e, 0x72,
-	0x1f, 0xf6, 0xbf, 0x2a, 0x30, 0x7e, 0x5d, 0x12, 0x45, 0xde, 0x74, 0x91, 0xc6, 0xd9, 0x97, 0x53,
-	0xd9, 0xff, 0x0b, 0x1a, 0xf5, 0x0f, 0x44, 0x19, 0x88, 0xa8, 0x45, 0xbe, 0xd3, 0xd0, 0x48, 0xca,
-	0x40, 0xa6, 0x0b, 0x71, 0x1d, 0x10, 0xfd, 0x23, 0xe8, 0x26, 0x66, 0x39, 0xb2, 0xf6, 0x34, 0x46,
-	0xd0, 0xb9, 0x4d, 0x70, 0xb0, 0x46, 0x2d, 0x2a, 0x89, 0xd2, 0xbf, 0x1b, 0x81, 0xf1, 0x94, 0x50,
-	0xb8, 0x3a, 0x29, 0xdf, 0x73, 0xdb, 0xf7, 0xcc, 0xc0, 0xa2, 0xbc, 0x24, 0x15, 0xa3, 0x15, 0x4b,
-	0x0d, 0x8b, 0xe2, 0xa8, 0x6a, 0xbd, 0xd0, 0x35, 0x45, 0x23, 0x44, 0x8c, 0x55, 0x8c, 0xba, 0x17,
-	0xba, 0xbc, 0xfa, 0xa3, 0x4b, 0xb0, 0x06, 0xb6, 0x99, 0xf3, 0x54, 0x66, 0x9e, 0x3a, 0xd6, 0xc0,
-	0x5e, 0xce, 0x38, 0x9b, 0x85, 0x89, 0x20, 0x74, 0x70, 0x1e, 0x5e, 0x61, 0xf0, 0xf1, 0x48, 0x95,
-	0xc5, 0x9f, 0x80, 0x96, 0xd5, 0xa3, 0xf6, 0x03, 0x2c, 0xcf, 0xaf, 0xb2, 0xf3, 0x9b, 0x5c, 0x28,
-	0x42, 0x38, 0x01, 0x2d, 0xc7, 0xb7, 0xfa, 0xb8, 0x6f, 0x6e, 0x38, 0x7e, 0xef, 0x1e, 0xe9, 0xd6,
-	0x38, 0x88, 0x0b, 0x17, 0x99, 0x4c, 0xff, 0x0a, 0x26, 0x22, 0x0a, 0x96, 0x2f, 0x66, 0x49, 0x98,
-	0x82, 0xd1, 0x90, 0xe0, 0xc0, 0xb4, 0xfb, 0xa2, 0x21, 0x6b, 0xd1, 0xe7, 0x72, 0x1f, 0x9d, 0x82,
-	0x4a, 0xdf, 0xa2, 0x16, 0x4b, 0xb8, 0x31, 0x7f, 0x58, 0x5e, 0xf5, 0x36, 0x1a, 0x0d, 0x06, 0xd3,
-	0x2f, 0x03, 0x8a, 0x54, 0x24, 0xeb, 0xfd, 0x0c, 0x54, 0x49, 0x24, 0x10, 0xef, 0xc7, 0x91, 0xb4,
-	0x97, 0x5c, 0x24, 0x06, 0x47, 0xea, 0x8f, 0x15, 0xd0, 0x56, 0x30, 0x0d, 0xec, 0x1e, 0xb9, 0xe4,
-	0x07, 0xd9, 0xca, 0x7a, 0xc3, 0x75, 0x7f, 0x16, 0x9a, 0xb2, 0x74, 0x4d, 0x82, 0xe9, 0xee, 0x0f,
-	0x74, 0x43, 0x42, 0xd7, 0x30, 0x4d, 0x3a, 0xa6, 0x92, 0x7e, 0x2f, 0xae, 0xc1, 0xf4, 0x8e, 0x99,
-	0x08, 0x82, 0x66, 0xa0, 0xe6, 0x32, 0x88, 0x60, 0xa8, 0x93, 0xbc, 0xb0, 0xdc, 0xd4, 0x10, 0x7a,
-	0xfd, 0x16, 0x9c, 0xdc, 0xc1, 0x59, 0xae, 0x43, 0xf6, 0xef, 0xb2, 0x0b, 0x87, 0x84, 0xcb, 0x15,
-	0x4c, 0xad, 0xe8, 0x1a, 0x65, 0xc3, 0xac, 0xc2, 0xd4, 0x36, 0x8d, 0x70, 0xff, 0x3e, 0x8c, 0xb9,
-	0x42, 0x26, 0x0e, 0xe8, 0xe6, 0x0f, 0x88, 0x6d, 0x62, 0xa4, 0xfe, 0xb7, 0x02, 0x07, 0x72, 0x33,
-	0x29, 0xba, 0x98, 0xbb, 0x81, 0xef, 0x9a, 0x72, 0xa9, 0x4a, 0x6a, 0xb0, 0x1d, 0xc9, 0x97, 0x85,
-	0x78, 0xb9, 0x9f, 0x2e, 0xd2, 0x91, 0x4c, 0x91, 0x7a, 0x50, 0x63, 0xad, 0x2f, 0x87, 0xe9, 0x44,
-	0x12, 0x0a, 0xa3, 0xe8, 0xa6, 0x65, 0x07, 0x8b, 0x0b, 0xd1, 0x7c, 0xfa, 0xfd, 0xd9, 0xf4, 0x2b,
-	0xed, 0x63, 0xdc, 0x7e, 0xa1, 0x6f, 0x0d, 0x28, 0x0e, 0x0c, 0x71, 0x0a, 0x7a, 0x07, 0x6a, 0x7c,
-	0x84, 0x76, 0x2b, 0xec, 0xbc, 0x96, 0xac, 0x8d, 0xf4, 0x94, 0x15, 0x10, 0xfd, 0x07, 0x05, 0xaa,
-	0x3c, 0xd3, 0x37, 0x55, 0xb0, 0x2a, 0x8c, 0x61, 0xaf, 0xe7, 0xf7, 0x6d, 0x6f, 0x93, 0xbd, 0x38,
-	0x55, 0x23, 0xfe, 0x46, 0x48, 0xf4, 0x6f, 0x54, 0x91, 0x4d, 0xd1, 0xa4, 0x0b, 0xd0, 0xca, 0x54,
-	0x4e, 0x66, 0x63, 0x52, 0xf6, 0xb5, 0x31, 0x99, 0xd0, 0x4c, 0x6b, 0xd0, 0x49, 0xa8, 0xd0, 0x47,
-	0x03, 0xfe, 0x74, 0xb6, 0xe7, 0xc7, 0xa5, 0x35, 0x53, 0xaf, 0x3f, 0x1a, 0x60, 0x83, 0xa9, 0xa3,
-	0x68, 0xd8, 0xd0, 0xe7, 0xd7, 0xc7, 0x7e, 0x47, 0x4d, 0xc3, 0x26, 0x1e, 0x0b, 0xbd, 0x6e, 0xf0,
-	0x0f, 0xfd, 0x7b, 0x05, 0xda, 0x49, 0xa5, 0x5c, 0xb2, 0x1d, 0xfc, 0x3a, 0x0a, 0x45, 0x85, 0xb1,
-	0xbb, 0xb6, 0x83, 0x59, 0x0c, 0xfc, 0xb8, 0xf8, 0xbb, 0x88, 0xa9, 0xb7, 0xaf, 0x42, 0x3d, 0x4e,
-	0x01, 0xd5, 0xa1, 0xba, 0x74, 0xeb, 0xf6, 0xc2, 0xf5, 0x4e, 0x09, 0xb5, 0xa0, 0x7e, 0x63, 0x75,
-	0xdd, 0xe4, 0x9f, 0x0a, 0x3a, 0x00, 0x0d, 0x63, 0xe9, 0xf2, 0xd2, 0xe7, 0xe6, 0xca, 0xc2, 0xfa,
-	0x85, 0x2b, 0x9d, 0x11, 0x84, 0xa0, 0xcd, 0x05, 0x37, 0x56, 0x85, 0xac, 0x3c, 0xff, 0xe7, 0x28,
-	0x8c, 0xc9, 0x18, 0xd1, 0x39, 0xa8, 0xdc, 0x0c, 0xc9, 0x16, 0x3a, 0x94, 0x54, 0xea, 0x67, 0x81,
-	0x4d, 0xb1, 0xe8, 0x3c, 0x75, 0x6a, 0x9b, 0x9c, 0xf7, 0x9d, 0x5e, 0x42, 0x17, 0xa1, 0x91, 0x5a,
-	0x04, 0x51, 0xe1, 0x7f, 0x00, 0xf5, 0x48, 0x46, 0x9a, 0x7d, 0x1a, 0xf4, 0xd2, 0x69, 0x05, 0xad,
-	0x42, 0x9b, 0xa9, 0xe4, 0xd6, 0x47, 0xd0, 0xff, 0xa4, 0x49, 0xd1, 0x26, 0xac, 0x1e, 0xdd, 0x41,
-	0x1b, 0x87, 0x75, 0x05, 0x1a, 0xa9, 0xdd, 0x06, 0xa9, 0x99, 0x02, 0xca, 0x2c, 0x80, 0x49, 0x70,
-	0x05, 0x6b, 0x94, 0x5e, 0x42, 0x77, 0xc4, 0x92, 0x93, 0xde, 0x92, 0x76, 0xf5, 0x77, 0xbc, 0x40,
-	0x57, 0x90, 0xf2, 0x12, 0x40, 0xb2, 0x4f, 0xa0, 0xc3, 0x19, 0xa3, 0xf4, 0x42, 0xa5, 0xaa, 0x45,
-	0xaa, 0x38, 0xbc, 0x35, 0xe8, 0xe4, 0xd7, 0x92, 0xdd, 0x9c, 0x1d, 0xdb, 0xae, 0x2a, 0x88, 0x6d,
-	0x11, 0xea, 0xf1, 0x48, 0x45, 0xdd, 0x82, 0x29, 0xcb, 0x9d, 0xed, 0x3c, 0x7f, 0xf5, 0x12, 0xba,
-	0x04, 0xcd, 0x05, 0xc7, 0xd9, 0x8f, 0x1b, 0x35, 0xad, 0x21, 0x79, 0x3f, 0x4e, 0xfc, 0xea, 0xe7,
-	0x47, 0x0c, 0xfa, 0x7f, 0xdc, 0xd8, 0xbb, 0x8e, 0x66, 0xf5, 0xad, 0x3d, 0x71, 0xf1, 0x69, 0xdf,
-	0xc0, 0xd1, 0x5d, 0x07, 0xda, 0xbe, 0xcf, 0x3c, 0xb5, 0x07, 0xae, 0x80, 0xf5, 0x75, 0x38, 0x90,
-	0x9b, 0x6f, 0x48, 0xcb, 0x79, 0xc9, 0x8d, 0x44, 0x75, 0x7a, 0x47, 0xbd, 0xf4, 0xbb, 0xf8, 0xf1,
-	0x93, 0xe7, 0x5a, 0xe9, 0xe9, 0x73, 0xad, 0xf4, 0xf2, 0xb9, 0xa6, 0x7c, 0x3b, 0xd4, 0x94, 0x5f,
-	0x86, 0x9a, 0xf2, 0x78, 0xa8, 0x29, 0x4f, 0x86, 0x9a, 0xf2, 0xc7, 0x50, 0x53, 0xfe, 0x1a, 0x6a,
-	0xa5, 0x97, 0x43, 0x4d, 0xf9, 0xf1, 0x85, 0x56, 0x7a, 0xf2, 0x42, 0x2b, 0x3d, 0x7d, 0xa1, 0x95,
-	0xbe, 0xa8, 0xf5, 0x1c, 0x1b, 0x7b, 0x74, 0xa3, 0xc6, 0xfe, 0xfa, 0xbf, 0xf7, 0x4f, 0x00, 0x00,
-	0x00, 0xff, 0xff, 0x84, 0xf7, 0x8d, 0x61, 0x65, 0x10, 0x00, 0x00,
+	// 1368 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x73, 0x14, 0x55,
+	0x14, 0x9e, 0xce, 0x3c, 0x98, 0x39, 0xf3, 0x60, 0x72, 0x13, 0xc8, 0xa4, 0x91, 0x0e, 0x34, 0x85,
+	0xa6, 0x54, 0x26, 0x30, 0x6a, 0x15, 0xa8, 0x48, 0x25, 0x10, 0x20, 0x40, 0x08, 0x74, 0x02, 0x5a,
+	0x96, 0x56, 0x57, 0x67, 0xe6, 0x92, 0xb4, 0xf4, 0x63, 0xe8, 0x7b, 0x27, 0x05, 0xae, 0xb4, 0xfc,
+	0x01, 0xba, 0x74, 0xeb, 0x4a, 0x7f, 0x80, 0x3f, 0x82, 0x25, 0x0b, 0x17, 0x94, 0x0b, 0x4a, 0x86,
+	0x8d, 0x4b, 0xfc, 0x07, 0x56, 0xdf, 0x47, 0xbf, 0x32, 0x79, 0x60, 0x81, 0xbb, 0xbe, 0xe7, 0x7c,
+	0xe7, 0xdc, 0x73, 0xbf, 0x7b, 0xce, 0x3d, 0x67, 0x06, 0x1a, 0xb6, 0xb7, 0x81, 0x09, 0xc5, 0x41,
+	0xbb, 0x1f, 0xf8, 0xd4, 0x47, 0xa5, 0xae, 0x1f, 0x50, 0xfc, 0x50, 0x9d, 0xdc, 0xf0, 0x37, 0x7c,
+	0x26, 0x9a, 0x0b, 0xbf, 0xb8, 0x56, 0x3d, 0xb7, 0x61, 0xd3, 0xcd, 0xc1, 0x7a, 0xbb, 0xeb, 0xbb,
+	0x73, 0x1c, 0xd8, 0x0f, 0xfc, 0x6f, 0x70, 0x97, 0x8a, 0xd5, 0x5c, 0xff, 0xfe, 0x86, 0x54, 0xac,
+	0x8b, 0x0f, 0x61, 0x7a, 0xfe, 0x55, 0x4c, 0xb7, 0x3a, 0xe2, 0x73, 0xab, 0xc3, 0xcd, 0xf5, 0xf3,
+	0x50, 0x35, 0xb0, 0xd5, 0x33, 0xf0, 0x83, 0x01, 0x26, 0x14, 0xb5, 0xe1, 0xc0, 0x83, 0x01, 0x0e,
+	0x6c, 0x4c, 0x5a, 0xca, 0xb1, 0xfc, 0x6c, 0xb5, 0x33, 0xd9, 0x16, 0xbb, 0xdd, 0x1e, 0xe0, 0xe0,
+	0x91, 0x80, 0x19, 0x12, 0xa4, 0x5f, 0x80, 0x1a, 0x37, 0x27, 0x7d, 0xdf, 0x23, 0x18, 0xcd, 0xc1,
+	0x81, 0x00, 0x93, 0x81, 0x43, 0xa5, 0xfd, 0xa1, 0x8c, 0x3d, 0xc7, 0x19, 0x12, 0xa5, 0x5f, 0x87,
+	0x7a, 0x4a, 0x83, 0x3e, 0x06, 0xa0, 0xb6, 0x8b, 0xc9, 0xa8, 0x20, 0xfa, 0xeb, 0xed, 0x35, 0xdb,
+	0xc5, 0xab, 0x4c, 0xb7, 0x50, 0x78, 0xfc, 0x6c, 0x26, 0x67, 0x24, 0xd0, 0xfa, 0xcf, 0x0a, 0xd4,
+	0x92, 0x71, 0xa2, 0xf7, 0x01, 0x11, 0x6a, 0x05, 0xd4, 0x64, 0x20, 0x6a, 0xb9, 0x7d, 0xd3, 0x0d,
+	0x9d, 0x2a, 0xb3, 0x79, 0xa3, 0xc9, 0x34, 0x6b, 0x52, 0xb1, 0x4c, 0xd0, 0x2c, 0x34, 0xb1, 0xd7,
+	0x4b, 0x63, 0xc7, 0x18, 0xb6, 0x81, 0xbd, 0x5e, 0x12, 0x79, 0x1a, 0xca, 0xae, 0x45, 0xbb, 0x9b,
+	0x38, 0x20, 0xad, 0x7c, 0x9a, 0xa7, 0x1b, 0xd6, 0x3a, 0x76, 0x96, 0xb9, 0xd2, 0x88, 0x50, 0xfa,
+	0x2f, 0x0a, 0x4c, 0x2e, 0x3e, 0xc4, 0x6e, 0xdf, 0xb1, 0x82, 0xff, 0x25, 0xc4, 0x33, 0xdb, 0x42,
+	0x3c, 0x34, 0x2a, 0x44, 0x92, 0x88, 0xf1, 0x2b, 0x98, 0x60, 0xa1, 0xad, 0xd2, 0x00, 0x5b, 0x6e,
+	0x74, 0x23, 0x17, 0xa0, 0xda, 0xdd, 0x1c, 0x78, 0xf7, 0x53, 0x57, 0x32, 0x25, 0x9d, 0xc5, 0x17,
+	0x72, 0x31, 0x04, 0x89, 0x5b, 0x49, 0x5a, 0x5c, 0x2b, 0x94, 0xc7, 0x9a, 0x79, 0x7d, 0x15, 0x0e,
+	0x65, 0x08, 0x78, 0x0d, 0x37, 0xfe, 0x87, 0x02, 0x88, 0x1d, 0xe7, 0xae, 0xe5, 0x0c, 0x30, 0x91,
+	0xa4, 0x1e, 0x05, 0x70, 0x42, 0xa9, 0xe9, 0x59, 0x2e, 0x66, 0x64, 0x56, 0x8c, 0x0a, 0x93, 0xdc,
+	0xb4, 0x5c, 0xbc, 0x03, 0xe7, 0x63, 0xaf, 0xc0, 0x79, 0x7e, 0x4f, 0xce, 0x0b, 0xc7, 0x94, 0x7d,
+	0x70, 0x8e, 0x26, 0xa1, 0xe8, 0xd8, 0xae, 0x4d, 0x5b, 0x45, 0xe6, 0x91, 0x2f, 0xf4, 0xb3, 0x30,
+	0x91, 0x3a, 0x95, 0x60, 0xea, 0x38, 0xd4, 0xf8, 0xb1, 0xb6, 0x98, 0x9c, 0x71, 0x55, 0x31, 0xaa,
+	0x4e, 0x0c, 0xd5, 0x3f, 0x83, 0xe9, 0x84, 0x65, 0xe6, 0x26, 0xf7, 0x61, 0xff, 0xbb, 0x02, 0xe3,
+	0x37, 0x24, 0x51, 0xe4, 0x4d, 0x27, 0x69, 0x74, 0xfa, 0x7c, 0xe2, 0xf4, 0xff, 0x81, 0x46, 0xfd,
+	0x23, 0x91, 0x06, 0x22, 0x6a, 0x71, 0xde, 0x19, 0xa8, 0xc6, 0x69, 0x20, 0x8f, 0x0b, 0x51, 0x1e,
+	0x10, 0xfd, 0x13, 0x68, 0xc5, 0x66, 0x19, 0xb2, 0xf6, 0x34, 0x46, 0xd0, 0xbc, 0x43, 0x70, 0xb0,
+	0x4a, 0x2d, 0x2a, 0x89, 0xd2, 0xbf, 0x1f, 0x83, 0xf1, 0x84, 0x50, 0xb8, 0x3a, 0x29, 0xdb, 0x81,
+	0xed, 0x7b, 0x66, 0x60, 0x51, 0x9e, 0x92, 0x8a, 0x51, 0x8f, 0xa4, 0x86, 0x45, 0x71, 0x98, 0xb5,
+	0xde, 0xc0, 0x35, 0x45, 0x21, 0x84, 0x8c, 0x15, 0x8c, 0x8a, 0x37, 0x70, 0x79, 0xf6, 0x87, 0x97,
+	0x60, 0xf5, 0x6d, 0x33, 0xe3, 0x29, 0xcf, 0x3c, 0x35, 0xad, 0xbe, 0xbd, 0x94, 0x72, 0xd6, 0x86,
+	0x89, 0x60, 0xe0, 0xe0, 0x2c, 0xbc, 0xc0, 0xe0, 0xe3, 0xa1, 0x2a, 0x8d, 0x3f, 0x01, 0x75, 0xab,
+	0x4b, 0xed, 0x2d, 0x2c, 0xf7, 0x2f, 0xb2, 0xfd, 0x6b, 0x5c, 0x28, 0x42, 0x38, 0x01, 0x75, 0xc7,
+	0xb7, 0x7a, 0xb8, 0x67, 0xae, 0x3b, 0x7e, 0xf7, 0x3e, 0x69, 0x95, 0x38, 0x88, 0x0b, 0x17, 0x98,
+	0x4c, 0xff, 0x1a, 0x26, 0x42, 0x0a, 0x96, 0x2e, 0xa5, 0x49, 0x98, 0x82, 0x03, 0x03, 0x82, 0x03,
+	0xd3, 0xee, 0x89, 0x82, 0x2c, 0x85, 0xcb, 0xa5, 0x1e, 0x3a, 0x05, 0x85, 0x9e, 0x45, 0x2d, 0x76,
+	0xe0, 0x6a, 0x67, 0x5a, 0x5e, 0xf5, 0x36, 0x1a, 0x0d, 0x06, 0xd3, 0xaf, 0x00, 0x0a, 0x55, 0x24,
+	0xed, 0xfd, 0x0c, 0x14, 0x49, 0x28, 0x10, 0xef, 0xc7, 0x91, 0xa4, 0x97, 0x4c, 0x24, 0x06, 0x47,
+	0xea, 0x8f, 0x15, 0xd0, 0x96, 0x31, 0x0d, 0xec, 0x2e, 0xb9, 0xec, 0x07, 0xe9, 0xcc, 0x7a, 0xc3,
+	0x79, 0x7f, 0x16, 0x6a, 0x32, 0x75, 0x4d, 0x82, 0xe9, 0xee, 0x0f, 0x74, 0x55, 0x42, 0x57, 0x31,
+	0x8d, 0x2b, 0xa6, 0x90, 0x7c, 0x2f, 0xae, 0xc3, 0xcc, 0x8e, 0x27, 0x11, 0x04, 0xcd, 0x42, 0xc9,
+	0x65, 0x10, 0xc1, 0x50, 0x33, 0x7e, 0x61, 0xb9, 0xa9, 0x21, 0xf4, 0xfa, 0x6d, 0x38, 0xb9, 0x83,
+	0xb3, 0x4c, 0x85, 0xec, 0xdf, 0x65, 0x0b, 0x0e, 0x0b, 0x97, 0xcb, 0x98, 0x5a, 0xe1, 0x35, 0xca,
+	0x82, 0x59, 0x81, 0xa9, 0x6d, 0x1a, 0xe1, 0xfe, 0x43, 0x28, 0xbb, 0x42, 0x26, 0x36, 0x68, 0x65,
+	0x37, 0x88, 0x6c, 0x22, 0xa4, 0xfe, 0x8f, 0x02, 0x07, 0x33, 0x3d, 0x29, 0xbc, 0x98, 0x7b, 0x81,
+	0xef, 0x9a, 0x72, 0x26, 0x8b, 0x73, 0xb0, 0x11, 0xca, 0x97, 0x84, 0x78, 0xa9, 0x97, 0x4c, 0xd2,
+	0xb1, 0x54, 0x92, 0x7a, 0x50, 0x62, 0xa5, 0x2f, 0x9b, 0xe9, 0x44, 0x1c, 0x0a, 0xa3, 0xe8, 0x96,
+	0x65, 0x07, 0x0b, 0xf3, 0x61, 0x7f, 0xfa, 0xf3, 0xd9, 0xcc, 0x2b, 0x8d, 0x73, 0xdc, 0x7e, 0xbe,
+	0x67, 0xf5, 0x29, 0x0e, 0x0c, 0xb1, 0x0b, 0x7a, 0x0f, 0x4a, 0xbc, 0x85, 0xb6, 0x0a, 0x6c, 0xbf,
+	0xba, 0xcc, 0x8d, 0x64, 0x97, 0x15, 0x10, 0xfd, 0x47, 0x05, 0x8a, 0xfc, 0xa4, 0x6f, 0x2a, 0x61,
+	0x55, 0x28, 0x63, 0xaf, 0xeb, 0xf7, 0x6c, 0x6f, 0x83, 0xbd, 0x38, 0x45, 0x23, 0x5a, 0x23, 0x24,
+	0xea, 0x37, 0xcc, 0xc8, 0x9a, 0x28, 0xd2, 0x79, 0xa8, 0xa7, 0x32, 0x27, 0x35, 0x31, 0x29, 0xfb,
+	0x9a, 0x98, 0x4c, 0xa8, 0x25, 0x35, 0xe8, 0x24, 0x14, 0xe8, 0xa3, 0x3e, 0x7f, 0x3a, 0x1b, 0x9d,
+	0x71, 0x69, 0xcd, 0xd4, 0x6b, 0x8f, 0xfa, 0xd8, 0x60, 0xea, 0x30, 0x1a, 0xd6, 0xf4, 0xf9, 0xf5,
+	0xb1, 0xef, 0xb0, 0x68, 0x58, 0xc7, 0x63, 0xa1, 0x57, 0x0c, 0xbe, 0xd0, 0x7f, 0x50, 0xa0, 0x11,
+	0x67, 0xca, 0x65, 0xdb, 0xc1, 0xaf, 0x23, 0x51, 0x54, 0x28, 0xdf, 0xb3, 0x1d, 0xcc, 0x62, 0xe0,
+	0xdb, 0x45, 0xeb, 0x51, 0x4c, 0xbd, 0x7b, 0x0d, 0x2a, 0xd1, 0x11, 0x50, 0x05, 0x8a, 0x8b, 0xb7,
+	0xef, 0xcc, 0xdf, 0x68, 0xe6, 0x50, 0x1d, 0x2a, 0x37, 0x57, 0xd6, 0x4c, 0xbe, 0x54, 0xd0, 0x41,
+	0xa8, 0x1a, 0x8b, 0x57, 0x16, 0xbf, 0x30, 0x97, 0xe7, 0xd7, 0x2e, 0x5e, 0x6d, 0x8e, 0x21, 0x04,
+	0x0d, 0x2e, 0xb8, 0xb9, 0x22, 0x64, 0xf9, 0xce, 0xaf, 0x65, 0x28, 0xcb, 0x18, 0xd1, 0x39, 0x28,
+	0xdc, 0x1a, 0x90, 0x4d, 0x74, 0x38, 0xce, 0xd4, 0xcf, 0x03, 0x9b, 0x62, 0x51, 0x79, 0xea, 0xd4,
+	0x36, 0x39, 0xaf, 0x3b, 0x3d, 0x87, 0x2e, 0x40, 0x29, 0x34, 0xbd, 0xdb, 0x41, 0x71, 0xc5, 0x6d,
+	0x75, 0xd2, 0xe6, 0xd3, 0x23, 0x34, 0x91, 0x83, 0x4b, 0x50, 0x4d, 0x4c, 0x92, 0x68, 0xe4, 0x8f,
+	0x08, 0xf5, 0x48, 0x4a, 0x9a, 0x7e, 0x5b, 0xf4, 0xdc, 0x69, 0x05, 0xad, 0x40, 0x83, 0xa9, 0xe4,
+	0xd8, 0x48, 0xd0, 0x5b, 0xd2, 0x64, 0xd4, 0x28, 0xad, 0x1e, 0xdd, 0x41, 0x1b, 0x85, 0x75, 0x15,
+	0xaa, 0x89, 0xe1, 0x08, 0xa9, 0xa9, 0x0c, 0x4c, 0x4d, 0x90, 0x71, 0x70, 0x23, 0xe6, 0x30, 0x3d,
+	0x87, 0xee, 0x8a, 0x29, 0x29, 0x39, 0x66, 0xed, 0xea, 0xef, 0xf8, 0x08, 0xdd, 0x88, 0x23, 0x2f,
+	0x02, 0xc4, 0x03, 0x09, 0x9a, 0x4e, 0x19, 0x25, 0x27, 0x32, 0x55, 0x1d, 0xa5, 0x8a, 0xc2, 0x5b,
+	0x85, 0x66, 0x76, 0xae, 0xd9, 0xcd, 0xd9, 0xb1, 0xed, 0xaa, 0x11, 0xb1, 0x2d, 0x40, 0x25, 0xea,
+	0xc9, 0x51, 0x62, 0xb4, 0xb3, 0x23, 0x90, 0xba, 0x73, 0x03, 0xd7, 0x73, 0xe8, 0x32, 0xd4, 0xe6,
+	0x1d, 0x67, 0x3f, 0x6e, 0xd4, 0xa4, 0x86, 0x64, 0xfd, 0x38, 0x51, 0xdb, 0xc8, 0xf6, 0x28, 0xf4,
+	0x76, 0xf4, 0x32, 0xec, 0xda, 0xdb, 0xd5, 0x77, 0xf6, 0xc4, 0x45, 0xbb, 0x7d, 0x0b, 0x47, 0x77,
+	0xed, 0x88, 0xfb, 0xde, 0xf3, 0xd4, 0x1e, 0xb8, 0x11, 0xac, 0xaf, 0xc1, 0xc1, 0x4c, 0x83, 0x44,
+	0x5a, 0xc6, 0x4b, 0xa6, 0xa7, 0xaa, 0x33, 0x3b, 0xea, 0xa5, 0xdf, 0x85, 0x4f, 0x9f, 0x3c, 0xd7,
+	0x72, 0x4f, 0x9f, 0x6b, 0xb9, 0x97, 0xcf, 0x35, 0xe5, 0xbb, 0xa1, 0xa6, 0xfc, 0x36, 0xd4, 0x94,
+	0xc7, 0x43, 0x4d, 0x79, 0x32, 0xd4, 0x94, 0xbf, 0x86, 0x9a, 0xf2, 0xf7, 0x50, 0xcb, 0xbd, 0x1c,
+	0x6a, 0xca, 0x4f, 0x2f, 0xb4, 0xdc, 0x93, 0x17, 0x5a, 0xee, 0xe9, 0x0b, 0x2d, 0xf7, 0x65, 0xa9,
+	0xeb, 0xd8, 0xd8, 0xa3, 0xeb, 0x25, 0xf6, 0xdf, 0xc1, 0x07, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff,
+	0x63, 0x80, 0x72, 0xdc, 0xe5, 0x10, 0x00, 0x00,
 }
 
 func (x MatchType) String() string {
@@ -2779,6 +2782,7 @@ const _ = grpc.SupportPackageIsVersion4
 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type IngesterClient interface {
 	Push(ctx context.Context, in *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error)
+	PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error)
 	QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error)
 	QueryExemplars(ctx context.Context, in *ExemplarQueryRequest, opts ...grpc.CallOption) (*ExemplarQueryResponse, error)
 	LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error)
@@ -2809,6 +2813,15 @@ func (c *ingesterClient) Push(ctx context.Context, in *cortexpb.WriteRequest, op
 	return out, nil
 }
 
+func (c *ingesterClient) PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) {
+	out := new(cortexpbv2.WriteResponse)
+	err := c.cc.Invoke(ctx, "/cortex.Ingester/PushV2", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
 func (c *ingesterClient) QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) {
 	stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[0], "/cortex.Ingester/QueryStream", opts...)
 	if err != nil {
@@ -3003,6 +3016,7 @@ func (c *ingesterClient) MetricsMetadata(ctx context.Context, in *MetricsMetadat
 // IngesterServer is the server API for Ingester service.
 type IngesterServer interface {
 	Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error)
+	PushV2(context.Context, *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error)
 	QueryStream(*QueryRequest, Ingester_QueryStreamServer) error
 	QueryExemplars(context.Context, *ExemplarQueryRequest) (*ExemplarQueryResponse, error)
 	LabelValues(context.Context, *LabelValuesRequest) (*LabelValuesResponse, error)
@@ -3023,6 +3037,9 @@ type UnimplementedIngesterServer struct {
 func (*UnimplementedIngesterServer) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) {
 	return nil, status.Errorf(codes.Unimplemented, "method Push not implemented")
 }
+func (*UnimplementedIngesterServer) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method PushV2 not implemented")
+}
 func (*UnimplementedIngesterServer) QueryStream(req *QueryRequest, srv Ingester_QueryStreamServer) error {
 	return status.Errorf(codes.Unimplemented, "method QueryStream not implemented")
 }
@@ -3079,6 +3096,24 @@ func _Ingester_Push_Handler(srv interface{}, ctx context.Context, dec func(inter
 	return interceptor(ctx, in, info, handler)
 }
 
+func _Ingester_PushV2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(cortexpbv2.WriteRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(IngesterServer).PushV2(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/cortex.Ingester/PushV2",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(IngesterServer).PushV2(ctx, req.(*cortexpbv2.WriteRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
 func _Ingester_QueryStream_Handler(srv interface{}, stream grpc.ServerStream) error {
 	m := new(QueryRequest)
 	if err := stream.RecvMsg(m); err != nil {
@@ -3297,6 +3332,10 @@ var _Ingester_serviceDesc = grpc.ServiceDesc{
 			MethodName: "Push",
 			Handler:    _Ingester_Push_Handler,
 		},
+		{
+			MethodName: "PushV2",
+			Handler:    _Ingester_PushV2_Handler,
+		},
 		{
 			MethodName: "QueryExemplars",
 			Handler:    _Ingester_QueryExemplars_Handler,
diff --git a/pkg/ingester/client/ingester.proto b/pkg/ingester/client/ingester.proto
index 68f343693e..01f7be6f83 100644
--- a/pkg/ingester/client/ingester.proto
+++ b/pkg/ingester/client/ingester.proto
@@ -7,12 +7,15 @@ option go_package = "client";
 
 import "gogoproto/gogo.proto";
 import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto";
+import "github.com/cortexproject/cortex/pkg/cortexpbv2/cortexv2.proto";
 
 option (gogoproto.marshaler_all) = true;
 option (gogoproto.unmarshaler_all) = true;
 
 service Ingester {
   rpc Push(cortexpb.WriteRequest) returns (cortexpb.WriteResponse) {};
+  rpc PushV2(cortexpbv2.WriteRequest) returns (cortexpbv2.WriteResponse) {};
+  
   rpc QueryStream(QueryRequest) returns (stream QueryStreamResponse) {};
   rpc QueryExemplars(ExemplarQueryRequest) returns (ExemplarQueryResponse) {};
 
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index 00dd1337ce..e8891c6a59 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -44,6 +44,7 @@ import (
 
 	"github.com/cortexproject/cortex/pkg/chunk/encoding"
 	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/ingester/client"
 	"github.com/cortexproject/cortex/pkg/querysharding"
 	"github.com/cortexproject/cortex/pkg/ring"
@@ -1056,6 +1057,375 @@ type extendedAppender interface {
 	storage.GetRef
 }
 
+func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+	if err := i.checkRunning(); err != nil {
+		return nil, err
+	}
+
+	span, ctx := opentracing.StartSpanFromContext(ctx, "Ingester.PushV2")
+	defer span.Finish()
+
+	// We will report *this* request in the error too.
+	inflight := i.inflightPushRequests.Inc()
+	defer i.inflightPushRequests.Dec()
+
+	gl := i.getInstanceLimits()
+	if gl != nil && gl.MaxInflightPushRequests > 0 {
+		if inflight > gl.MaxInflightPushRequests {
+			return nil, errTooManyInflightPushRequests
+		}
+	}
+
+	var firstPartialErr error
+
+	// NOTE: because we use `unsafe` in deserialisation, we must not
+	// retain anything from `req` past the call to ReuseSlice
+	defer cortexpbv2.ReuseSlice(req.Timeseries)
+
+	userID, err := tenant.TenantID(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	il := i.getInstanceLimits()
+	if il != nil && il.MaxIngestionRate > 0 {
+		if rate := i.ingestionRate.Rate(); rate >= il.MaxIngestionRate {
+			return nil, errMaxSamplesPushRateLimitReached
+		}
+	}
+
+	db, err := i.getOrCreateTSDB(userID, false)
+	if err != nil {
+		return nil, wrapWithUser(err, userID)
+	}
+
+	// Ensure the ingester shutdown procedure hasn't started
+	i.stoppedMtx.RLock()
+	if i.stopped {
+		i.stoppedMtx.RUnlock()
+		return nil, errIngesterStopping
+	}
+	i.stoppedMtx.RUnlock()
+
+	if err := db.acquireAppendLock(); err != nil {
+		return &cortexpbv2.WriteResponse{}, httpgrpc.Errorf(http.StatusServiceUnavailable, wrapWithUser(err, userID).Error())
+	}
+	defer db.releaseAppendLock()
+
+	// Keep track of some stats which are tracked only if the samples will be
+	// successfully committed
+	var (
+		succeededSamplesCount       = 0
+		failedSamplesCount          = 0
+		succeededExemplarsCount     = 0
+		succeededMetadataCount      = 0
+		failedMetadataCount         = 0
+		failedExemplarsCount        = 0
+		startAppend                 = time.Now()
+		sampleOutOfBoundsCount      = 0
+		sampleOutOfOrderCount       = 0
+		sampleTooOldCount           = 0
+		newValueForTimestampCount   = 0
+		perUserSeriesLimitCount     = 0
+		perLabelSetSeriesLimitCount = 0
+		perMetricSeriesLimitCount   = 0
+		nativeHistogramCount        = 0
+		succeededHistogramCount     = 0
+
+		updateFirstPartial = func(errFn func() error) {
+			if firstPartialErr == nil {
+				firstPartialErr = errFn()
+			}
+		}
+
+		handleAppendFailure = func(err error, timestampMs int64, lbls []cortexpb.LabelAdapter, copiedLabels labels.Labels) (rollback bool) {
+			// Check if the error is a soft error we can proceed on. If so, we keep track
+			// of it, so that we can return it back to the distributor, which will return a
+			// 400 error to the client. The client (Prometheus) will not retry on 400, and
+			// we actually ingested all samples which haven't failed.
+			switch cause := errors.Cause(err); {
+			case errors.Is(cause, storage.ErrOutOfBounds):
+				sampleOutOfBoundsCount++
+				updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) })
+
+			case errors.Is(cause, storage.ErrOutOfOrderSample):
+				sampleOutOfOrderCount++
+				updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) })
+
+			case errors.Is(cause, storage.ErrDuplicateSampleForTimestamp):
+				newValueForTimestampCount++
+				updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) })
+
+			case errors.Is(cause, storage.ErrTooOldSample):
+				sampleTooOldCount++
+				updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) })
+
+			case errors.Is(cause, errMaxSeriesPerUserLimitExceeded):
+				perUserSeriesLimitCount++
+				updateFirstPartial(func() error { return makeLimitError(perUserSeriesLimit, i.limiter.FormatError(userID, cause)) })
+
+			case errors.Is(cause, errMaxSeriesPerMetricLimitExceeded):
+				perMetricSeriesLimitCount++
+				updateFirstPartial(func() error {
+					return makeMetricLimitError(perMetricSeriesLimit, copiedLabels, i.limiter.FormatError(userID, cause))
+				})
+
+			case errors.As(cause, &errMaxSeriesPerLabelSetLimitExceeded{}):
+				perLabelSetSeriesLimitCount++
+				updateFirstPartial(func() error {
+					return makeMetricLimitError(perLabelsetSeriesLimit, copiedLabels, i.limiter.FormatError(userID, cause))
+				})
+
+			case errors.Is(cause, histogram.ErrHistogramSpanNegativeOffset):
+				updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) })
+
+			case errors.Is(cause, histogram.ErrHistogramSpansBucketsMismatch):
+				updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) })
+
+			case errors.Is(cause, histogram.ErrHistogramNegativeBucketCount):
+				updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) })
+
+			case errors.Is(cause, histogram.ErrHistogramCountNotBigEnough):
+				updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) })
+
+			case errors.Is(cause, histogram.ErrHistogramCountMismatch):
+				updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) })
+
+			default:
+				rollback = true
+			}
+			return
+		}
+	)
+
+	// Walk the samples, appending them to the users database
+	app := db.Appender(ctx).(extendedAppender)
+	b := labels.NewScratchBuilder(0)
+	for _, ts := range req.Timeseries {
+		tsLabels := ts.ToLabels(&b, req.Symbols)
+		seriesLabels := cortexpb.FromLabelsToLabelAdapters(tsLabels)
+
+		// The labels must be sorted (in our case, it's guaranteed a write request
+		// has sorted labels once hit the ingester).
+
+		// Look up a reference for this series.
+		tsLabelsHash := tsLabels.Hash()
+		ref, copiedLabels := app.GetRef(tsLabels, tsLabelsHash)
+
+		// To find out if any sample was added to this series, we keep old value.
+		oldSucceededSamplesCount := succeededSamplesCount
+
+		for _, s := range ts.Samples {
+			var err error
+
+			// If the cached reference exists, we try to use it.
+			if ref != 0 {
+				if _, err = app.Append(ref, copiedLabels, s.Timestamp, s.Value); err == nil {
+					succeededSamplesCount++
+					continue
+				}
+
+			} else {
+				// Copy the label set because both TSDB and the active series tracker may retain it.
+				copiedLabels = cortexpb.FromLabelAdaptersToLabelsWithCopy(seriesLabels)
+
+				// Retain the reference in case there are multiple samples for the series.
+				if ref, err = app.Append(0, copiedLabels, s.Timestamp, s.Value); err == nil {
+					succeededSamplesCount++
+					continue
+				}
+			}
+
+			failedSamplesCount++
+
+			if rollback := handleAppendFailure(err, s.Timestamp, seriesLabels, copiedLabels); !rollback {
+				continue
+			}
+			// The error looks an issue on our side, so we should rollback
+			if rollbackErr := app.Rollback(); rollbackErr != nil {
+				level.Warn(logutil.WithContext(ctx, i.logger)).Log("msg", "failed to rollback on error", "user", userID, "err", rollbackErr)
+			}
+
+			return nil, wrapWithUser(err, userID)
+		}
+
+		if i.cfg.BlocksStorageConfig.TSDB.EnableNativeHistograms {
+			for _, hp := range ts.Histograms {
+				var (
+					err error
+					h   *histogram.Histogram
+					fh  *histogram.FloatHistogram
+				)
+
+				if hp.GetCountFloat() > 0 {
+					fh = cortexpbv2.FloatHistogramProtoToFloatHistogram(hp)
+				} else {
+					h = cortexpbv2.HistogramProtoToHistogram(hp)
+				}
+
+				if ref != 0 {
+					if _, err = app.AppendHistogram(ref, copiedLabels, hp.Timestamp, h, fh); err == nil {
+						succeededHistogramCount++
+						succeededSamplesCount++
+						continue
+					}
+				} else {
+					// Copy the label set because both TSDB and the active series tracker may retain it.
+					copiedLabels = cortexpb.FromLabelAdaptersToLabelsWithCopy(seriesLabels)
+					if ref, err = app.AppendHistogram(0, copiedLabels, hp.Timestamp, h, fh); err == nil {
+						succeededHistogramCount++
+						succeededSamplesCount++
+						continue
+					}
+				}
+
+				failedSamplesCount++
+
+				if rollback := handleAppendFailure(err, hp.Timestamp, seriesLabels, copiedLabels); !rollback {
+					continue
+				}
+				// The error looks an issue on our side, so we should rollback
+				if rollbackErr := app.Rollback(); rollbackErr != nil {
+					level.Warn(logutil.WithContext(ctx, i.logger)).Log("msg", "failed to rollback on error", "user", userID, "err", rollbackErr)
+				}
+				return nil, wrapWithUser(err, userID)
+			}
+		} else {
+			nativeHistogramCount += len(ts.Histograms)
+		}
+
+		if i.cfg.ActiveSeriesMetricsEnabled && succeededSamplesCount > oldSucceededSamplesCount {
+			db.activeSeries.UpdateSeries(tsLabels, tsLabelsHash, startAppend, func(l labels.Labels) labels.Labels {
+				// we must already have copied the labels if succeededSamplesCount has been incremented.
+				return copiedLabels
+			})
+		}
+
+		maxExemplarsForUser := i.getMaxExemplars(userID)
+		if maxExemplarsForUser > 0 {
+			// app.AppendExemplar currently doesn't create the series, it must
+			// already exist.  If it does not then drop.
+			if ref == 0 && len(ts.Exemplars) > 0 {
+				updateFirstPartial(func() error {
+					return wrappedTSDBIngestExemplarErr(errExemplarRef,
+						model.Time(ts.Exemplars[0].Timestamp), seriesLabels, cortexpb.FromLabelsToLabelAdapters(ts.Exemplars[0].ToLabels(&b, req.Symbols)))
+				})
+				failedExemplarsCount += len(ts.Exemplars)
+			} else { // Note that else is explicit, rather than a continue in the above if, in case of additional logic post exemplar processing.
+				for _, ex := range ts.Exemplars {
+					exLabels := ex.ToLabels(&b, req.Symbols)
+					e := exemplar.Exemplar{
+						Value:  ex.Value,
+						Ts:     ex.Timestamp,
+						HasTs:  true,
+						Labels: exLabels,
+					}
+
+					if _, err = app.AppendExemplar(ref, nil, e); err == nil {
+						succeededExemplarsCount++
+						continue
+					}
+
+					// Error adding exemplar
+					updateFirstPartial(func() error {
+						return wrappedTSDBIngestExemplarErr(err, model.Time(ex.Timestamp), seriesLabels, cortexpb.FromLabelsToLabelAdapters(exLabels))
+					})
+					failedExemplarsCount++
+				}
+			}
+		}
+
+		if ts.Metadata.Type != cortexpbv2.METRIC_TYPE_UNSPECIFIED {
+			metaData := ts.Metadata.ToV1Metadata(tsLabels.Get(model.MetricNameLabel), req.Symbols)
+			if err := i.appendMetadata(userID, metaData); err == nil {
+				succeededMetadataCount++
+			} else {
+				level.Warn(i.logger).Log("msg", "failed to ingest metadata", "err", err)
+				failedMetadataCount++
+			}
+		}
+	}
+	// At this point all samples have been added to the appender, so we can track the time it took.
+	i.TSDBState.appenderAddDuration.Observe(time.Since(startAppend).Seconds())
+
+	startCommit := time.Now()
+	if err := app.Commit(); err != nil {
+		return nil, wrapWithUser(err, userID)
+	}
+	i.TSDBState.appenderCommitDuration.Observe(time.Since(startCommit).Seconds())
+
+	// If only invalid samples are pushed, don't change "last update", as TSDB was not modified.
+	if succeededSamplesCount > 0 {
+		db.setLastUpdate(time.Now())
+	}
+
+	// Increment metrics only if the samples have been successfully committed.
+	// If the code didn't reach this point, it means that we returned an error
+	// which will be converted into an HTTP 5xx and the client should/will retry.
+	i.metrics.ingestedMetadata.Add(float64(succeededMetadataCount))
+	i.metrics.ingestedMetadataFail.Add(float64(failedMetadataCount))
+	i.metrics.ingestedSamples.Add(float64(succeededSamplesCount))
+	i.metrics.ingestedSamplesFail.Add(float64(failedSamplesCount))
+	i.metrics.ingestedExemplars.Add(float64(succeededExemplarsCount))
+	i.metrics.ingestedExemplarsFail.Add(float64(failedExemplarsCount))
+
+	if sampleOutOfBoundsCount > 0 {
+		i.validateMetrics.DiscardedSamples.WithLabelValues(sampleOutOfBounds, userID).Add(float64(sampleOutOfBoundsCount))
+	}
+	if sampleOutOfOrderCount > 0 {
+		i.validateMetrics.DiscardedSamples.WithLabelValues(sampleOutOfOrder, userID).Add(float64(sampleOutOfOrderCount))
+	}
+	if sampleTooOldCount > 0 {
+		i.validateMetrics.DiscardedSamples.WithLabelValues(sampleTooOld, userID).Add(float64(sampleTooOldCount))
+	}
+	if newValueForTimestampCount > 0 {
+		i.validateMetrics.DiscardedSamples.WithLabelValues(newValueForTimestamp, userID).Add(float64(newValueForTimestampCount))
+	}
+	if perUserSeriesLimitCount > 0 {
+		i.validateMetrics.DiscardedSamples.WithLabelValues(perUserSeriesLimit, userID).Add(float64(perUserSeriesLimitCount))
+	}
+	if perMetricSeriesLimitCount > 0 {
+		i.validateMetrics.DiscardedSamples.WithLabelValues(perMetricSeriesLimit, userID).Add(float64(perMetricSeriesLimitCount))
+	}
+	if perLabelSetSeriesLimitCount > 0 {
+		i.validateMetrics.DiscardedSamples.WithLabelValues(perLabelsetSeriesLimit, userID).Add(float64(perLabelSetSeriesLimitCount))
+	}
+
+	if !i.cfg.BlocksStorageConfig.TSDB.EnableNativeHistograms && nativeHistogramCount > 0 {
+		i.validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramSample, userID).Add(float64(nativeHistogramCount))
+	}
+
+	// Distributor counts both samples, metadata and histograms, so for consistency ingester does the same.
+	i.ingestionRate.Add(int64(succeededSamplesCount + succeededMetadataCount))
+
+	switch req.Source {
+	case cortexpbv2.RULE:
+		db.ingestedRuleSamples.Add(int64(succeededSamplesCount))
+	case cortexpbv2.API:
+		fallthrough
+	default:
+		db.ingestedAPISamples.Add(int64(succeededSamplesCount))
+	}
+
+	if firstPartialErr != nil {
+		code := http.StatusBadRequest
+		var ve *validationError
+		if errors.As(firstPartialErr, &ve) {
+			code = ve.code
+		}
+		level.Debug(logutil.WithContext(ctx, i.logger)).Log("msg", "partial failures to push", "totalSamples", succeededSamplesCount+failedSamplesCount, "failedSamples", failedSamplesCount, "firstPartialErr", firstPartialErr)
+		return &cortexpbv2.WriteResponse{}, httpgrpc.Errorf(code, wrapWithUser(firstPartialErr, userID).Error())
+	}
+
+	writeResponse := &cortexpbv2.WriteResponse{
+		Samples:    int64(succeededSamplesCount),
+		Histograms: int64(succeededHistogramCount),
+		Exemplars:  int64(succeededExemplarsCount),
+	}
+
+	return writeResponse, nil
+}
+
 // Push adds metrics to a block
 func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) {
 	if err := i.checkRunning(); err != nil {
diff --git a/pkg/ingester/ingester_prw2_test.go b/pkg/ingester/ingester_prw2_test.go
new file mode 100644
index 0000000000..ac132527fb
--- /dev/null
+++ b/pkg/ingester/ingester_prw2_test.go
@@ -0,0 +1,4468 @@
+package ingester
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"net/http"
+	"net/http/httptest"
+	"net/url"
+	"os"
+	"path/filepath"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/go-kit/log"
+	"github.com/oklog/ulid"
+	"github.com/pkg/errors"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/client_golang/prometheus/testutil"
+	"github.com/prometheus/common/model"
+	"github.com/prometheus/prometheus/model/histogram"
+	"github.com/prometheus/prometheus/model/labels"
+	writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
+	"github.com/prometheus/prometheus/storage"
+	"github.com/prometheus/prometheus/tsdb/chunkenc"
+	"github.com/prometheus/prometheus/tsdb/tsdbutil"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"github.com/thanos-io/objstore"
+	"github.com/thanos-io/thanos/pkg/shipper"
+	"github.com/weaveworks/common/httpgrpc"
+	"github.com/weaveworks/common/middleware"
+	"github.com/weaveworks/common/user"
+	"golang.org/x/sync/errgroup"
+	"google.golang.org/grpc"
+
+	"github.com/cortexproject/cortex/pkg/chunk"
+	"github.com/cortexproject/cortex/pkg/chunk/encoding"
+	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
+	"github.com/cortexproject/cortex/pkg/ingester/client"
+	"github.com/cortexproject/cortex/pkg/ring"
+	cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb"
+	"github.com/cortexproject/cortex/pkg/util"
+	"github.com/cortexproject/cortex/pkg/util/services"
+	"github.com/cortexproject/cortex/pkg/util/test"
+	"github.com/cortexproject/cortex/pkg/util/validation"
+)
+
+func TestIngesterPRW2_Push(t *testing.T) {
+	metricLabelAdapters := []cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}}
+	metricLabels := cortexpb.FromLabelAdaptersToLabels(metricLabelAdapters)
+	metricNames := []string{
+		"cortex_ingester_ingested_samples_total",
+		"cortex_ingester_ingested_samples_failures_total",
+		"cortex_ingester_memory_series",
+		"cortex_ingester_memory_users",
+		"cortex_ingester_memory_series_created_total",
+		"cortex_ingester_memory_series_removed_total",
+	}
+	userID := "test"
+
+	testHistogramV2 := cortexpbv2.HistogramToHistogramProto(10, tsdbutil.GenerateTestHistogram(1))
+	testHistogram := cortexpb.HistogramToHistogramProto(10, tsdbutil.GenerateTestHistogram(1))
+	testFloatHistogramV2 := cortexpbv2.FloatHistogramToHistogramProto(11, tsdbutil.GenerateTestFloatHistogram(1))
+	testFloatHistogram := cortexpb.FloatHistogramToHistogramProto(11, tsdbutil.GenerateTestFloatHistogram(1))
+	tests := map[string]struct {
+		reqs                      []*cortexpbv2.WriteRequest
+		expectedErr               error
+		expectedIngested          []cortexpb.TimeSeries
+		expectedMetadataIngested  []*cortexpb.MetricMetadata
+		expectedExemplarsIngested []cortexpb.TimeSeries
+		expectedMetrics           string
+		additionalMetrics         []string
+		disableActiveSeries       bool
+		maxExemplars              int
+		oooTimeWindow             time.Duration
+		disableNativeHistogram    bool
+	}{
+		"should record native histogram discarded": {
+			reqs: []*cortexpbv2.WriteRequest{
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 2, Timestamp: 10}},
+					[]cortexpbv2.Histogram{{Timestamp: 10}},
+					[]cortexpbv2.Metadata{{Type: cortexpbv2.METRIC_TYPE_GAUGE, HelpRef: 3}},
+					cortexpbv2.API,
+					"a help for metric_name_2"),
+			},
+			expectedErr: nil,
+			expectedIngested: []cortexpb.TimeSeries{
+				{Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 10}}},
+			},
+			expectedMetadataIngested: []*cortexpb.MetricMetadata{
+				{MetricFamilyName: "test", Help: "a help for metric_name_2", Unit: "", Type: cortexpb.GAUGE},
+			},
+			additionalMetrics:      []string{"cortex_discarded_samples_total", "cortex_ingester_active_series"},
+			disableNativeHistogram: true,
+			expectedMetrics: `
+				# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
+				# TYPE cortex_ingester_ingested_samples_total counter
+				cortex_ingester_ingested_samples_total 1
+				# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
+				# TYPE cortex_ingester_ingested_samples_failures_total counter
+				cortex_ingester_ingested_samples_failures_total 0
+				# HELP cortex_ingester_memory_users The current number of users in memory.
+				# TYPE cortex_ingester_memory_users gauge
+				cortex_ingester_memory_users 1
+				# HELP cortex_ingester_memory_series The current number of series in memory.
+				# TYPE cortex_ingester_memory_series gauge
+				cortex_ingester_memory_series 1
+				# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+				# TYPE cortex_ingester_memory_series_created_total counter
+				cortex_ingester_memory_series_created_total{user="test"} 1
+				# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+				# TYPE cortex_ingester_memory_series_removed_total counter
+				cortex_ingester_memory_series_removed_total{user="test"} 0
+				# HELP cortex_discarded_samples_total The total number of samples that were discarded.
+				# TYPE cortex_discarded_samples_total counter
+				cortex_discarded_samples_total{reason="native-histogram-sample",user="test"} 1
+				# HELP cortex_ingester_active_series Number of currently active series per user.
+				# TYPE cortex_ingester_active_series gauge
+				cortex_ingester_active_series{user="test"} 1
+			`,
+		},
+		"should succeed on valid series and metadata": {
+			reqs: []*cortexpbv2.WriteRequest{
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+					nil,
+					[]cortexpbv2.Metadata{{HelpRef: 3, Type: cortexpbv2.METRIC_TYPE_COUNTER}},
+					cortexpbv2.API,
+					"a help for metric_name_1"),
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 2, Timestamp: 10}},
+					nil,
+					[]cortexpbv2.Metadata{{HelpRef: 3, Type: cortexpbv2.METRIC_TYPE_GAUGE}},
+					cortexpbv2.API,
+					"a help for metric_name_2"),
+			},
+			expectedErr: nil,
+			expectedIngested: []cortexpb.TimeSeries{
+				{Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 9}, {Value: 2, TimestampMs: 10}}},
+			},
+			expectedMetadataIngested: []*cortexpb.MetricMetadata{
+				{MetricFamilyName: "test", Help: "a help for metric_name_2", Unit: "", Type: cortexpb.GAUGE},
+				{MetricFamilyName: "test", Help: "a help for metric_name_1", Unit: "", Type: cortexpb.COUNTER},
+			},
+			additionalMetrics: []string{
+				// Metadata.
+				"cortex_ingester_memory_metadata",
+				"cortex_ingester_memory_metadata_created_total",
+				"cortex_ingester_ingested_metadata_total",
+				"cortex_ingester_ingested_metadata_failures_total",
+				"cortex_ingester_active_series",
+			},
+			expectedMetrics: `
+				# HELP cortex_ingester_ingested_metadata_failures_total The total number of metadata that errored on ingestion.
+				# TYPE cortex_ingester_ingested_metadata_failures_total counter
+				cortex_ingester_ingested_metadata_failures_total 0
+				# HELP cortex_ingester_ingested_metadata_total The total number of metadata ingested.
+				# TYPE cortex_ingester_ingested_metadata_total counter
+				cortex_ingester_ingested_metadata_total 2
+				# HELP cortex_ingester_memory_metadata The current number of metadata in memory.
+				# TYPE cortex_ingester_memory_metadata gauge
+				cortex_ingester_memory_metadata 2
+				# HELP cortex_ingester_memory_metadata_created_total The total number of metadata that were created per user
+				# TYPE cortex_ingester_memory_metadata_created_total counter
+				cortex_ingester_memory_metadata_created_total{user="test"} 2
+				# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
+				# TYPE cortex_ingester_ingested_samples_total counter
+				cortex_ingester_ingested_samples_total 2
+				# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
+				# TYPE cortex_ingester_ingested_samples_failures_total counter
+				cortex_ingester_ingested_samples_failures_total 0
+				# HELP cortex_ingester_memory_users The current number of users in memory.
+				# TYPE cortex_ingester_memory_users gauge
+				cortex_ingester_memory_users 1
+				# HELP cortex_ingester_memory_series The current number of series in memory.
+				# TYPE cortex_ingester_memory_series gauge
+				cortex_ingester_memory_series 1
+				# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+				# TYPE cortex_ingester_memory_series_created_total counter
+				cortex_ingester_memory_series_created_total{user="test"} 1
+				# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+				# TYPE cortex_ingester_memory_series_removed_total counter
+				cortex_ingester_memory_series_removed_total{user="test"} 0
+				# HELP cortex_ingester_active_series Number of currently active series per user.
+				# TYPE cortex_ingester_active_series gauge
+				cortex_ingester_active_series{user="test"} 1
+			`,
+		},
+		"should succeed on valid series with exemplars": {
+			maxExemplars: 2,
+			reqs: []*cortexpbv2.WriteRequest{
+				// Ingesting an exemplar requires a sample to create the series first
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+					nil,
+					nil,
+					cortexpbv2.API),
+				{
+					Symbols: []string{"", "__name__", "test", "traceID", "123", "456"},
+					Timeseries: []cortexpbv2.PreallocTimeseriesV2{
+						{
+							TimeSeries: &cortexpbv2.TimeSeries{
+								LabelsRefs: []uint32{1, 2},
+								Exemplars: []cortexpbv2.Exemplar{
+									{
+										LabelsRefs: []uint32{3, 4},
+										Timestamp:  1000,
+										Value:      1000,
+									},
+									{
+										LabelsRefs: []uint32{3, 5},
+										Timestamp:  1001,
+										Value:      1001,
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+			expectedErr: nil,
+			expectedIngested: []cortexpb.TimeSeries{
+				{Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 9}}},
+			},
+			expectedExemplarsIngested: []cortexpb.TimeSeries{
+				{
+					Labels: metricLabelAdapters,
+					Exemplars: []cortexpb.Exemplar{
+						{
+							Labels:      []cortexpb.LabelAdapter{{Name: "traceID", Value: "123"}},
+							TimestampMs: 1000,
+							Value:       1000,
+						},
+						{
+							Labels:      []cortexpb.LabelAdapter{{Name: "traceID", Value: "456"}},
+							TimestampMs: 1001,
+							Value:       1001,
+						},
+					},
+				},
+			},
+			expectedMetadataIngested: nil,
+			additionalMetrics: []string{
+				"cortex_ingester_tsdb_exemplar_exemplars_appended_total",
+				"cortex_ingester_tsdb_exemplar_exemplars_in_storage",
+				"cortex_ingester_tsdb_exemplar_series_with_exemplars_in_storage",
+				"cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds",
+				"cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total",
+				"cortex_ingester_active_series",
+			},
+			expectedMetrics: `
+				# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
+				# TYPE cortex_ingester_ingested_samples_total counter
+				cortex_ingester_ingested_samples_total 1
+				# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
+				# TYPE cortex_ingester_ingested_samples_failures_total counter
+				cortex_ingester_ingested_samples_failures_total 0
+				# HELP cortex_ingester_memory_users The current number of users in memory.
+				# TYPE cortex_ingester_memory_users gauge
+				cortex_ingester_memory_users 1
+				# HELP cortex_ingester_memory_series The current number of series in memory.
+				# TYPE cortex_ingester_memory_series gauge
+				cortex_ingester_memory_series 1
+				# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+				# TYPE cortex_ingester_memory_series_created_total counter
+				cortex_ingester_memory_series_created_total{user="test"} 1
+				# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+				# TYPE cortex_ingester_memory_series_removed_total counter
+				cortex_ingester_memory_series_removed_total{user="test"} 0
+				# HELP cortex_ingester_active_series Number of currently active series per user.
+				# TYPE cortex_ingester_active_series gauge
+				cortex_ingester_active_series{user="test"} 1
+
+				# HELP cortex_ingester_tsdb_exemplar_exemplars_appended_total Total number of TSDB exemplars appended.
+				# TYPE cortex_ingester_tsdb_exemplar_exemplars_appended_total counter
+				cortex_ingester_tsdb_exemplar_exemplars_appended_total 2
+
+				# HELP cortex_ingester_tsdb_exemplar_exemplars_in_storage Number of TSDB exemplars currently in storage.
+				# TYPE cortex_ingester_tsdb_exemplar_exemplars_in_storage gauge
+				cortex_ingester_tsdb_exemplar_exemplars_in_storage 2
+
+				# HELP cortex_ingester_tsdb_exemplar_series_with_exemplars_in_storage Number of TSDB series with exemplars currently in storage.
+				# TYPE cortex_ingester_tsdb_exemplar_series_with_exemplars_in_storage gauge
+				cortex_ingester_tsdb_exemplar_series_with_exemplars_in_storage{user="test"} 1
+
+				# HELP cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds The timestamp of the oldest exemplar stored in circular storage. Useful to check for what time range the current exemplar buffer limit allows. This usually means the last timestamp for all exemplars for a typical setup. This is not true though if one of the series timestamp is in future compared to rest series.
+				# TYPE cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds gauge
+				cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds{user="test"} 1
+
+				# HELP cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total Total number of out of order exemplar ingestion failed attempts.
+				# TYPE cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total counter
+				cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total 0
+			`,
+		},
+		"successful push, active series disabled": {
+			disableActiveSeries: true,
+			reqs: []*cortexpbv2.WriteRequest{
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+					nil,
+					nil,
+					cortexpbv2.API),
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 2, Timestamp: 10}},
+					nil,
+					nil,
+					cortexpbv2.API),
+			},
+			expectedErr: nil,
+			expectedIngested: []cortexpb.TimeSeries{
+				{Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 9}, {Value: 2, TimestampMs: 10}}},
+			},
+			expectedMetrics: `
+				# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
+				# TYPE cortex_ingester_ingested_samples_total counter
+				cortex_ingester_ingested_samples_total 2
+				# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
+				# TYPE cortex_ingester_ingested_samples_failures_total counter
+				cortex_ingester_ingested_samples_failures_total 0
+				# HELP cortex_ingester_memory_users The current number of users in memory.
+				# TYPE cortex_ingester_memory_users gauge
+				cortex_ingester_memory_users 1
+				# HELP cortex_ingester_memory_series The current number of series in memory.
+				# TYPE cortex_ingester_memory_series gauge
+				cortex_ingester_memory_series 1
+				# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+				# TYPE cortex_ingester_memory_series_created_total counter
+				cortex_ingester_memory_series_created_total{user="test"} 1
+				# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+				# TYPE cortex_ingester_memory_series_removed_total counter
+				cortex_ingester_memory_series_removed_total{user="test"} 0
+			`,
+		},
+		"ooo disabled, should soft fail on sample out of order": {
+			reqs: []*cortexpbv2.WriteRequest{
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 2, Timestamp: 10}},
+					nil,
+					nil,
+					cortexpbv2.API),
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+					[]cortexpbv2.Histogram{
+						cortexpbv2.HistogramToHistogramProto(9, tsdbutil.GenerateTestHistogram(1)),
+					},
+					nil,
+					cortexpbv2.API),
+			},
+			expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.ErrOutOfOrderSample, model.Time(9), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()),
+			expectedIngested: []cortexpb.TimeSeries{
+				{Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 10}}},
+			},
+			additionalMetrics: []string{
+				"cortex_ingester_tsdb_out_of_order_samples_total",
+				"cortex_ingester_tsdb_head_out_of_order_samples_appended_total",
+				"cortex_discarded_samples_total",
+				"cortex_ingester_active_series",
+			},
+			expectedMetrics: `
+				# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
+				# TYPE cortex_ingester_ingested_samples_total counter
+				cortex_ingester_ingested_samples_total 1
+				# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
+				# TYPE cortex_ingester_ingested_samples_failures_total counter
+				cortex_ingester_ingested_samples_failures_total 2
+				# HELP cortex_ingester_memory_users The current number of users in memory.
+				# TYPE cortex_ingester_memory_users gauge
+				cortex_ingester_memory_users 1
+        	    # HELP cortex_ingester_tsdb_head_out_of_order_samples_appended_total Total number of appended out of order samples.
+        	    # TYPE cortex_ingester_tsdb_head_out_of_order_samples_appended_total counter
+        	    cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="float",user="test"} 0
+        	    # HELP cortex_ingester_tsdb_out_of_order_samples_total Total number of out of order samples ingestion failed attempts due to out of order being disabled.
+        	    # TYPE cortex_ingester_tsdb_out_of_order_samples_total counter
+        	    cortex_ingester_tsdb_out_of_order_samples_total{type="float",user="test"} 1
+        	    cortex_ingester_tsdb_out_of_order_samples_total{type="histogram",user="test"} 1
+				# HELP cortex_ingester_memory_series The current number of series in memory.
+				# TYPE cortex_ingester_memory_series gauge
+				cortex_ingester_memory_series 1
+				# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+				# TYPE cortex_ingester_memory_series_created_total counter
+				cortex_ingester_memory_series_created_total{user="test"} 1
+				# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+				# TYPE cortex_ingester_memory_series_removed_total counter
+				cortex_ingester_memory_series_removed_total{user="test"} 0
+				# HELP cortex_discarded_samples_total The total number of samples that were discarded.
+				# TYPE cortex_discarded_samples_total counter
+				cortex_discarded_samples_total{reason="sample-out-of-order",user="test"} 2
+				# HELP cortex_ingester_active_series Number of currently active series per user.
+				# TYPE cortex_ingester_active_series gauge
+				cortex_ingester_active_series{user="test"} 1
+			`,
+		},
+		"ooo disabled, should soft fail on sample out of bound": {
+			reqs: []*cortexpbv2.WriteRequest{
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 2, Timestamp: 1575043969}},
+					nil,
+					nil,
+					cortexpbv2.API),
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 1, Timestamp: 1575043969 - (86400 * 1000)}},
+					[]cortexpbv2.Histogram{
+						cortexpbv2.HistogramToHistogramProto(1575043969-(86400*1000), tsdbutil.GenerateTestHistogram(1)),
+					},
+					nil,
+					cortexpbv2.API),
+			},
+			expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.ErrOutOfBounds, model.Time(1575043969-(86400*1000)), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()),
+			expectedIngested: []cortexpb.TimeSeries{
+				{Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}},
+			},
+			additionalMetrics: []string{"cortex_ingester_active_series"},
+			expectedMetrics: `
+				# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
+				# TYPE cortex_ingester_ingested_samples_total counter
+				cortex_ingester_ingested_samples_total 1
+				# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
+				# TYPE cortex_ingester_ingested_samples_failures_total counter
+				cortex_ingester_ingested_samples_failures_total 2
+				# HELP cortex_ingester_memory_users The current number of users in memory.
+				# TYPE cortex_ingester_memory_users gauge
+				cortex_ingester_memory_users 1
+				# HELP cortex_ingester_memory_series The current number of series in memory.
+				# TYPE cortex_ingester_memory_series gauge
+				cortex_ingester_memory_series 1
+				# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+				# TYPE cortex_ingester_memory_series_created_total counter
+				cortex_ingester_memory_series_created_total{user="test"} 1
+				# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+				# TYPE cortex_ingester_memory_series_removed_total counter
+				cortex_ingester_memory_series_removed_total{user="test"} 0
+				# HELP cortex_discarded_samples_total The total number of samples that were discarded.
+				# TYPE cortex_discarded_samples_total counter
+				cortex_discarded_samples_total{reason="sample-out-of-bounds",user="test"} 2
+				# HELP cortex_ingester_active_series Number of currently active series per user.
+				# TYPE cortex_ingester_active_series gauge
+				cortex_ingester_active_series{user="test"} 1
+			`,
+		},
+		"ooo enabled, should soft fail on sample too old": {
+			reqs: []*cortexpbv2.WriteRequest{
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 2, Timestamp: 1575043969}},
+					nil,
+					nil,
+					cortexpbv2.API),
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 1, Timestamp: 1575043969 - (600 * 1000)}},
+					nil,
+					nil,
+					cortexpbv2.API),
+			},
+			oooTimeWindow: 5 * time.Minute,
+			expectedErr:   httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.ErrTooOldSample, model.Time(1575043969-(600*1000)), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()),
+			expectedIngested: []cortexpb.TimeSeries{
+				{Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}},
+			},
+			additionalMetrics: []string{
+				"cortex_discarded_samples_total",
+				"cortex_ingester_active_series",
+			},
+			expectedMetrics: `
+				# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
+				# TYPE cortex_ingester_ingested_samples_total counter
+				cortex_ingester_ingested_samples_total 1
+				# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
+				# TYPE cortex_ingester_ingested_samples_failures_total counter
+				cortex_ingester_ingested_samples_failures_total 1
+				# HELP cortex_ingester_memory_users The current number of users in memory.
+				# TYPE cortex_ingester_memory_users gauge
+				cortex_ingester_memory_users 1
+				# HELP cortex_ingester_memory_series The current number of series in memory.
+				# TYPE cortex_ingester_memory_series gauge
+				cortex_ingester_memory_series 1
+				# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+				# TYPE cortex_ingester_memory_series_created_total counter
+				cortex_ingester_memory_series_created_total{user="test"} 1
+				# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+				# TYPE cortex_ingester_memory_series_removed_total counter
+				cortex_ingester_memory_series_removed_total{user="test"} 0
+				# HELP cortex_discarded_samples_total The total number of samples that were discarded.
+				# TYPE cortex_discarded_samples_total counter
+				cortex_discarded_samples_total{reason="sample-too-old",user="test"} 1
+				# HELP cortex_ingester_active_series Number of currently active series per user.
+				# TYPE cortex_ingester_active_series gauge
+				cortex_ingester_active_series{user="test"} 1
+			`,
+		},
+		"ooo enabled, should succeed": {
+			reqs: []*cortexpbv2.WriteRequest{
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 2, Timestamp: 1575043969}},
+					nil,
+					nil,
+					cortexpbv2.API),
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 1, Timestamp: 1575043969 - (60 * 1000)}},
+					nil,
+					nil,
+					cortexpbv2.API),
+			},
+			oooTimeWindow: 5 * time.Minute,
+			expectedIngested: []cortexpb.TimeSeries{
+				{Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 1575043969 - (60 * 1000)}, {Value: 2, TimestampMs: 1575043969}}},
+			},
+			additionalMetrics: []string{"cortex_ingester_active_series"},
+			expectedMetrics: `
+				# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
+				# TYPE cortex_ingester_ingested_samples_total counter
+				cortex_ingester_ingested_samples_total 2
+				# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
+				# TYPE cortex_ingester_ingested_samples_failures_total counter
+				cortex_ingester_ingested_samples_failures_total 0
+				# HELP cortex_ingester_memory_users The current number of users in memory.
+				# TYPE cortex_ingester_memory_users gauge
+				cortex_ingester_memory_users 1
+				# HELP cortex_ingester_memory_series The current number of series in memory.
+				# TYPE cortex_ingester_memory_series gauge
+				cortex_ingester_memory_series 1
+				# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+				# TYPE cortex_ingester_memory_series_created_total counter
+				cortex_ingester_memory_series_created_total{user="test"} 1
+				# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+				# TYPE cortex_ingester_memory_series_removed_total counter
+				cortex_ingester_memory_series_removed_total{user="test"} 0
+				# HELP cortex_ingester_active_series Number of currently active series per user.
+				# TYPE cortex_ingester_active_series gauge
+				cortex_ingester_active_series{user="test"} 1
+			`,
+		},
+		"should soft fail on two different sample values at the same timestamp": {
+			reqs: []*cortexpbv2.WriteRequest{
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 2, Timestamp: 1575043969}},
+					nil,
+					nil,
+					cortexpbv2.API),
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 1, Timestamp: 1575043969}},
+					nil,
+					nil,
+					cortexpbv2.API),
+			},
+			expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.NewDuplicateFloatErr(1575043969, 2, 1), model.Time(1575043969), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()),
+			expectedIngested: []cortexpb.TimeSeries{
+				{Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}},
+			},
+			additionalMetrics: []string{"cortex_discarded_samples_total", "cortex_ingester_active_series"},
+			expectedMetrics: `
+				# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
+				# TYPE cortex_ingester_ingested_samples_total counter
+				cortex_ingester_ingested_samples_total 1
+				# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
+				# TYPE cortex_ingester_ingested_samples_failures_total counter
+				cortex_ingester_ingested_samples_failures_total 1
+				# HELP cortex_ingester_memory_users The current number of users in memory.
+				# TYPE cortex_ingester_memory_users gauge
+				cortex_ingester_memory_users 1
+				# HELP cortex_ingester_memory_series The current number of series in memory.
+				# TYPE cortex_ingester_memory_series gauge
+				cortex_ingester_memory_series 1
+				# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+				# TYPE cortex_ingester_memory_series_created_total counter
+				cortex_ingester_memory_series_created_total{user="test"} 1
+				# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+				# TYPE cortex_ingester_memory_series_removed_total counter
+				cortex_ingester_memory_series_removed_total{user="test"} 0
+				# HELP cortex_discarded_samples_total The total number of samples that were discarded.
+				# TYPE cortex_discarded_samples_total counter
+				cortex_discarded_samples_total{reason="new-value-for-timestamp",user="test"} 1
+				# HELP cortex_ingester_active_series Number of currently active series per user.
+				# TYPE cortex_ingester_active_series gauge
+				cortex_ingester_active_series{user="test"} 1
+			`,
+		},
+		"should soft fail on exemplar with unknown series": {
+			maxExemplars: 1,
+			reqs: []*cortexpbv2.WriteRequest{
+				// Ingesting an exemplar requires a sample to create the series first
+				// This is not done here.
+				{
+					Symbols: []string{"", "__name__", "test", "traceID", "123"},
+					Timeseries: []cortexpbv2.PreallocTimeseriesV2{
+						{
+							TimeSeries: &cortexpbv2.TimeSeries{
+								LabelsRefs: []uint32{1, 2},
+								Exemplars: []cortexpbv2.Exemplar{
+									{
+										LabelsRefs: []uint32{3, 4},
+										Timestamp:  1000,
+										Value:      1000,
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+			expectedErr:              httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestExemplarErr(errExemplarRef, model.Time(1000), cortexpb.FromLabelsToLabelAdapters(metricLabels), []cortexpb.LabelAdapter{{Name: "traceID", Value: "123"}}), userID).Error()),
+			expectedIngested:         nil,
+			expectedMetadataIngested: nil,
+			additionalMetrics: []string{
+				"cortex_ingester_tsdb_exemplar_exemplars_appended_total",
+				"cortex_ingester_tsdb_exemplar_exemplars_in_storage",
+				"cortex_ingester_tsdb_exemplar_series_with_exemplars_in_storage",
+				"cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds",
+				"cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total",
+				"cortex_ingester_active_series",
+			},
+			expectedMetrics: `
+				# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
+				# TYPE cortex_ingester_ingested_samples_total counter
+				cortex_ingester_ingested_samples_total 0
+				# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
+				# TYPE cortex_ingester_ingested_samples_failures_total counter
+				cortex_ingester_ingested_samples_failures_total 0
+				# HELP cortex_ingester_memory_users The current number of users in memory.
+				# TYPE cortex_ingester_memory_users gauge
+				cortex_ingester_memory_users 1
+				# HELP cortex_ingester_memory_series The current number of series in memory.
+				# TYPE cortex_ingester_memory_series gauge
+				cortex_ingester_memory_series 0
+				# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+				# TYPE cortex_ingester_memory_series_created_total counter
+				cortex_ingester_memory_series_created_total{user="test"} 0
+				# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+				# TYPE cortex_ingester_memory_series_removed_total counter
+				cortex_ingester_memory_series_removed_total{user="test"} 0
+				# HELP cortex_ingester_active_series Number of currently active series per user.
+				# TYPE cortex_ingester_active_series gauge
+				cortex_ingester_active_series{user="test"} 0
+
+				# HELP cortex_ingester_tsdb_exemplar_exemplars_appended_total Total number of TSDB exemplars appended.
+				# TYPE cortex_ingester_tsdb_exemplar_exemplars_appended_total counter
+				cortex_ingester_tsdb_exemplar_exemplars_appended_total 0
+
+				# HELP cortex_ingester_tsdb_exemplar_exemplars_in_storage Number of TSDB exemplars currently in storage.
+				# TYPE cortex_ingester_tsdb_exemplar_exemplars_in_storage gauge
+				cortex_ingester_tsdb_exemplar_exemplars_in_storage 0
+
+				# HELP cortex_ingester_tsdb_exemplar_series_with_exemplars_in_storage Number of TSDB series with exemplars currently in storage.
+				# TYPE cortex_ingester_tsdb_exemplar_series_with_exemplars_in_storage gauge
+				cortex_ingester_tsdb_exemplar_series_with_exemplars_in_storage{user="test"} 0
+
+				# HELP cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds The timestamp of the oldest exemplar stored in circular storage. Useful to check for what time range the current exemplar buffer limit allows. This usually means the last timestamp for all exemplars for a typical setup. This is not true though if one of the series timestamp is in future compared to rest series.
+				# TYPE cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds gauge
+				cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds{user="test"} 0
+
+				# HELP cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total Total number of out of order exemplar ingestion failed attempts.
+				# TYPE cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total counter
+				cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total 0
+			`,
+		},
+		"should succeed when only native histogram present if enabled": {
+			reqs: []*cortexpbv2.WriteRequest{
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					nil,
+					[]cortexpbv2.Histogram{testHistogramV2},
+					nil,
+					cortexpbv2.API),
+			},
+			expectedErr: nil,
+			expectedIngested: []cortexpb.TimeSeries{
+				{Labels: metricLabelAdapters, Histograms: []cortexpb.Histogram{testHistogram}},
+			},
+			additionalMetrics: []string{
+				"cortex_ingester_tsdb_head_samples_appended_total",
+				"cortex_ingester_active_series",
+			},
+			expectedMetrics: `
+				# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
+				# TYPE cortex_ingester_ingested_samples_total counter
+				cortex_ingester_ingested_samples_total 1
+				# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
+				# TYPE cortex_ingester_ingested_samples_failures_total counter
+				cortex_ingester_ingested_samples_failures_total 0
+				# HELP cortex_ingester_memory_users The current number of users in memory.
+				# TYPE cortex_ingester_memory_users gauge
+				cortex_ingester_memory_users 1
+        	    # HELP cortex_ingester_tsdb_head_out_of_order_samples_appended_total Total number of appended out of order samples.
+        	    # TYPE cortex_ingester_tsdb_head_out_of_order_samples_appended_total counter
+        	    cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="float",user="test"} 0
+        	    # HELP cortex_ingester_tsdb_head_samples_appended_total Total number of appended samples.
+        	    # TYPE cortex_ingester_tsdb_head_samples_appended_total counter
+        	    cortex_ingester_tsdb_head_samples_appended_total{type="float",user="test"} 0
+        	    cortex_ingester_tsdb_head_samples_appended_total{type="histogram",user="test"} 1
+				# HELP cortex_ingester_memory_series The current number of series in memory.
+				# TYPE cortex_ingester_memory_series gauge
+				cortex_ingester_memory_series 1
+				# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+				# TYPE cortex_ingester_memory_series_created_total counter
+				cortex_ingester_memory_series_created_total{user="test"} 1
+				# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+				# TYPE cortex_ingester_memory_series_removed_total counter
+				cortex_ingester_memory_series_removed_total{user="test"} 0
+				# HELP cortex_discarded_samples_total The total number of samples that were discarded.
+				# TYPE cortex_discarded_samples_total counter
+				# HELP cortex_ingester_active_series Number of currently active series per user.
+				# TYPE cortex_ingester_active_series gauge
+				cortex_ingester_active_series{user="test"} 1
+			`,
+		},
+		"should succeed when only float native histogram present if enabled": {
+			reqs: []*cortexpbv2.WriteRequest{
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					nil,
+					[]cortexpbv2.Histogram{testFloatHistogramV2},
+					nil,
+					cortexpbv2.API),
+			},
+			expectedErr: nil,
+			expectedIngested: []cortexpb.TimeSeries{
+				{Labels: metricLabelAdapters, Histograms: []cortexpb.Histogram{testFloatHistogram}},
+			},
+			additionalMetrics: []string{
+				"cortex_ingester_tsdb_head_samples_appended_total",
+				"cortex_ingester_active_series",
+			},
+			expectedMetrics: `
+				# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
+				# TYPE cortex_ingester_ingested_samples_total counter
+				cortex_ingester_ingested_samples_total 1
+				# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
+				# TYPE cortex_ingester_ingested_samples_failures_total counter
+				cortex_ingester_ingested_samples_failures_total 0
+				# HELP cortex_ingester_memory_users The current number of users in memory.
+				# TYPE cortex_ingester_memory_users gauge
+				cortex_ingester_memory_users 1
+        	    # HELP cortex_ingester_tsdb_head_out_of_order_samples_appended_total Total number of appended out of order samples.
+        	    # TYPE cortex_ingester_tsdb_head_out_of_order_samples_appended_total counter
+        	    cortex_ingester_tsdb_head_out_of_order_samples_appended_total{type="float",user="test"} 0
+        	    # HELP cortex_ingester_tsdb_head_samples_appended_total Total number of appended samples.
+        	    # TYPE cortex_ingester_tsdb_head_samples_appended_total counter
+        	    cortex_ingester_tsdb_head_samples_appended_total{type="float",user="test"} 0
+        	    cortex_ingester_tsdb_head_samples_appended_total{type="histogram",user="test"} 1
+				# HELP cortex_ingester_memory_series The current number of series in memory.
+				# TYPE cortex_ingester_memory_series gauge
+				cortex_ingester_memory_series 1
+				# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+				# TYPE cortex_ingester_memory_series_created_total counter
+				cortex_ingester_memory_series_created_total{user="test"} 1
+				# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+				# TYPE cortex_ingester_memory_series_removed_total counter
+				cortex_ingester_memory_series_removed_total{user="test"} 0
+				# HELP cortex_discarded_samples_total The total number of samples that were discarded.
+				# TYPE cortex_discarded_samples_total counter
+				# HELP cortex_ingester_active_series Number of currently active series per user.
+				# TYPE cortex_ingester_active_series gauge
+				cortex_ingester_active_series{user="test"} 1
+			`,
+		},
+		"should fail to ingest histogram due to OOO native histogram. Sample and histogram has same timestamp but sample got ingested first": {
+			reqs: []*cortexpbv2.WriteRequest{
+				cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{metricLabels},
+					[]cortexpbv2.Sample{{Value: 2, Timestamp: 10}},
+					[]cortexpbv2.Histogram{testHistogramV2},
+					nil,
+					cortexpbv2.API),
+			},
+			expectedErr: nil,
+			expectedIngested: []cortexpb.TimeSeries{
+				{Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 10}}},
+			},
+			additionalMetrics: []string{
+				"cortex_ingester_tsdb_head_samples_appended_total",
+				"cortex_ingester_tsdb_out_of_order_samples_total",
+				"cortex_ingester_active_series",
+			},
+			expectedMetrics: `
+				# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
+				# TYPE cortex_ingester_ingested_samples_total counter
+				cortex_ingester_ingested_samples_total 2
+				# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
+				# TYPE cortex_ingester_ingested_samples_failures_total counter
+				cortex_ingester_ingested_samples_failures_total 0
+				# HELP cortex_ingester_memory_users The current number of users in memory.
+				# TYPE cortex_ingester_memory_users gauge
+				cortex_ingester_memory_users 1
+        	    # HELP cortex_ingester_tsdb_head_samples_appended_total Total number of appended samples.
+        	    # TYPE cortex_ingester_tsdb_head_samples_appended_total counter
+        	    cortex_ingester_tsdb_head_samples_appended_total{type="float",user="test"} 1
+        	    cortex_ingester_tsdb_head_samples_appended_total{type="histogram",user="test"} 0
+        	    # HELP cortex_ingester_tsdb_out_of_order_samples_total Total number of out of order samples ingestion failed attempts due to out of order being disabled.
+        	    # TYPE cortex_ingester_tsdb_out_of_order_samples_total counter
+        	    cortex_ingester_tsdb_out_of_order_samples_total{type="float",user="test"} 0
+        	    cortex_ingester_tsdb_out_of_order_samples_total{type="histogram",user="test"} 1
+				# HELP cortex_ingester_memory_series The current number of series in memory.
+				# TYPE cortex_ingester_memory_series gauge
+				cortex_ingester_memory_series 1
+				# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+				# TYPE cortex_ingester_memory_series_created_total counter
+				cortex_ingester_memory_series_created_total{user="test"} 1
+				# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+				# TYPE cortex_ingester_memory_series_removed_total counter
+				cortex_ingester_memory_series_removed_total{user="test"} 0
+				# HELP cortex_discarded_samples_total The total number of samples that were discarded.
+				# TYPE cortex_discarded_samples_total counter
+				# HELP cortex_ingester_active_series Number of currently active series per user.
+				# TYPE cortex_ingester_active_series gauge
+				cortex_ingester_active_series{user="test"} 1
+			`,
+		},
+	}
+
+	for testName, testData := range tests {
+		t.Run(testName, func(t *testing.T) {
+			registry := prometheus.NewRegistry()
+
+			// Create a mocked ingester
+			cfg := defaultIngesterTestConfig(t)
+			cfg.LifecyclerConfig.JoinAfter = 0
+			cfg.ActiveSeriesMetricsEnabled = !testData.disableActiveSeries
+
+			limits := defaultLimitsTestConfig()
+			limits.MaxExemplars = testData.maxExemplars
+			limits.OutOfOrderTimeWindow = model.Duration(testData.oooTimeWindow)
+			i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", registry, !testData.disableNativeHistogram)
+			require.NoError(t, err)
+			require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+			defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+			ctx := user.InjectOrgID(context.Background(), userID)
+
+			// Wait until the ingester is ACTIVE
+			test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} {
+				return i.lifecycler.GetState()
+			})
+
+			// Push timeseries
+			for idx, req := range testData.reqs {
+				_, err := i.PushV2(ctx, req)
+
+				// We expect no error on any request except the last one
+				// which may error (and in that case we assert on it)
+				if idx < len(testData.reqs)-1 {
+					assert.NoError(t, err)
+				} else {
+					assert.Equal(t, testData.expectedErr, err)
+				}
+			}
+
+			// Read back samples to see what has been really ingested
+			s := &mockQueryStreamServer{ctx: ctx}
+			err = i.QueryStream(&client.QueryRequest{
+				StartTimestampMs: math.MinInt64,
+				EndTimestampMs:   math.MaxInt64,
+				Matchers:         []*client.LabelMatcher{{Type: client.REGEX_MATCH, Name: labels.MetricName, Value: ".*"}},
+			}, s)
+			require.NoError(t, err)
+			set, err := seriesSetFromResponseStream(s)
+			require.NoError(t, err)
+
+			require.NotNil(t, set)
+			r, err := client.SeriesSetToQueryResponse(set)
+			require.NoError(t, err)
+			assert.Equal(t, testData.expectedIngested, r.Timeseries)
+
+			// Read back samples to see what has been really ingested
+			exemplarRes, err := i.QueryExemplars(ctx, &client.ExemplarQueryRequest{
+				StartTimestampMs: math.MinInt64,
+				EndTimestampMs:   math.MaxInt64,
+				Matchers: []*client.LabelMatchers{
+					{Matchers: []*client.LabelMatcher{{Type: client.REGEX_MATCH, Name: labels.MetricName, Value: ".*"}}},
+				},
+			})
+
+			require.NoError(t, err)
+			require.NotNil(t, exemplarRes)
+			assert.Equal(t, testData.expectedExemplarsIngested, exemplarRes.Timeseries)
+
+			// Read back metadata to see what has been really ingested.
+			mres, err := i.MetricsMetadata(ctx, &client.MetricsMetadataRequest{})
+
+			require.NoError(t, err)
+			require.NotNil(t, mres)
+
+			// Order is never guaranteed.
+			assert.ElementsMatch(t, testData.expectedMetadataIngested, mres.Metadata)
+
+			// Update active series for metrics check.
+			if !testData.disableActiveSeries {
+				i.updateActiveSeries(ctx)
+			}
+
+			// Append additional metrics to assert on.
+			mn := append(metricNames, testData.additionalMetrics...)
+
+			// Check tracked Prometheus metrics
+			err = testutil.GatherAndCompare(registry, strings.NewReader(testData.expectedMetrics), mn...)
+			assert.NoError(t, err)
+		})
+	}
+}
+
+func TestIngesterPRW2_MetricLimitExceeded(t *testing.T) {
+	limits := defaultLimitsTestConfig()
+	limits.MaxLocalSeriesPerMetric = 1
+	limits.MaxLocalMetadataPerMetric = 1
+
+	dir := t.TempDir()
+
+	chunksDir := filepath.Join(dir, "chunks")
+	blocksDir := filepath.Join(dir, "blocks")
+	require.NoError(t, os.Mkdir(chunksDir, os.ModePerm))
+	require.NoError(t, os.Mkdir(blocksDir, os.ModePerm))
+
+	blocksIngesterGenerator := func() *Ingester {
+		ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, nil, blocksDir, prometheus.NewRegistry(), true)
+		require.NoError(t, err)
+		require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing))
+		// Wait until it's ACTIVE
+		test.Poll(t, time.Second, ring.ACTIVE, func() interface{} {
+			return ing.lifecycler.GetState()
+		})
+
+		return ing
+	}
+
+	tests := []string{"chunks", "blocks"}
+	for i, ingGenerator := range []func() *Ingester{blocksIngesterGenerator} {
+		t.Run(tests[i], func(t *testing.T) {
+			ing := ingGenerator()
+
+			userID := "1"
+			labels1 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}
+			sample1 := cortexpbv2.Sample{
+				Timestamp: 0,
+				Value:     1,
+			}
+			sample2 := cortexpbv2.Sample{
+				Timestamp: 1,
+				Value:     2,
+			}
+			labels3 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "biz"}}
+			sample3 := cortexpbv2.Sample{
+				Timestamp: 1,
+				Value:     3,
+			}
+
+			// Append only one series and one metadata first, expect no error.
+			ctx := user.InjectOrgID(context.Background(), userID)
+			_, err := ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1}, []cortexpbv2.Sample{sample1}, nil, []cortexpbv2.Metadata{{HelpRef: 5, Type: cortexpbv2.METRIC_TYPE_COUNTER}}, cortexpbv2.API, "a help for testmetric"))
+			require.NoError(t, err)
+
+			testLimits := func() {
+				// Append two series, expect series-exceeded error.
+				_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1, labels3}, []cortexpbv2.Sample{sample2, sample3}, nil, nil, cortexpbv2.API))
+				httpResp, ok := httpgrpc.HTTPResponseFromError(err)
+				require.True(t, ok, "returned error is not an httpgrpc response")
+				assert.Equal(t, http.StatusBadRequest, int(httpResp.Code))
+				assert.Equal(t, wrapWithUser(makeMetricLimitError(perMetricSeriesLimit, labels3, ing.limiter.FormatError(userID, errMaxSeriesPerMetricLimitExceeded)), userID).Error(), string(httpResp.Body))
+
+				// Append two metadata for the same metric. Drop the second one, and expect no error since metadata is a best effort approach.
+				_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1, labels3}, nil, nil, []cortexpbv2.Metadata{{HelpRef: 6, Type: cortexpbv2.METRIC_TYPE_COUNTER}, {HelpRef: 7, Type: cortexpbv2.METRIC_TYPE_COUNTER}}, cortexpbv2.API, "a help for testmetric", "a help for testmetric2"))
+				require.NoError(t, err)
+
+				// Read samples back via ingester queries.
+				res, _, err := runTestQuery(ctx, t, ing, labels.MatchEqual, model.MetricNameLabel, "testmetric")
+				require.NoError(t, err)
+
+				// Verify Series
+				expected := model.Matrix{
+					{
+						Metric: cortexpb.FromLabelAdaptersToMetric(cortexpb.FromLabelsToLabelAdapters(labels1)),
+						Values: []model.SamplePair{
+							{
+								Timestamp: model.Time(sample1.Timestamp),
+								Value:     model.SampleValue(sample1.Value),
+							},
+							{
+								Timestamp: model.Time(sample2.Timestamp),
+								Value:     model.SampleValue(sample2.Value),
+							},
+						},
+					},
+				}
+
+				assert.Equal(t, expected, res)
+
+				// Verify metadata
+				m, err := ing.MetricsMetadata(ctx, nil)
+				require.NoError(t, err)
+				resultMetadata := &cortexpb.MetricMetadata{MetricFamilyName: "testmetric", Help: "a help for testmetric", Type: cortexpb.COUNTER}
+				assert.Equal(t, []*cortexpb.MetricMetadata{resultMetadata}, m.Metadata)
+			}
+
+			testLimits()
+
+			// Limits should hold after restart.
+			services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck
+			ing = ingGenerator()
+			defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck
+
+			testLimits()
+		})
+	}
+}
+
+func TestIngesterPRW2_UserLimitExceeded(t *testing.T) {
+	limits := defaultLimitsTestConfig()
+	limits.MaxLocalSeriesPerUser = 1
+	limits.MaxLocalMetricsWithMetadataPerUser = 1
+
+	dir := t.TempDir()
+
+	chunksDir := filepath.Join(dir, "chunks")
+	blocksDir := filepath.Join(dir, "blocks")
+	require.NoError(t, os.Mkdir(chunksDir, os.ModePerm))
+	require.NoError(t, os.Mkdir(blocksDir, os.ModePerm))
+
+	blocksIngesterGenerator := func() *Ingester {
+		ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, nil, blocksDir, prometheus.NewRegistry(), true)
+		require.NoError(t, err)
+		require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing))
+		// Wait until it's ACTIVE
+		test.Poll(t, time.Second, ring.ACTIVE, func() interface{} {
+			return ing.lifecycler.GetState()
+		})
+
+		return ing
+	}
+
+	tests := []string{"blocks"}
+	for i, ingGenerator := range []func() *Ingester{blocksIngesterGenerator} {
+		t.Run(tests[i], func(t *testing.T) {
+			ing := ingGenerator()
+
+			userID := "1"
+			// Series
+			labels1 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}
+			sample1 := cortexpbv2.Sample{
+				Timestamp: 0,
+				Value:     1,
+			}
+			sample2 := cortexpbv2.Sample{
+				Timestamp: 1,
+				Value:     2,
+			}
+			labels3 := labels.Labels{{Name: labels.MetricName, Value: "testmetric2"}, {Name: "foo", Value: "biz"}}
+			sample3 := cortexpbv2.Sample{
+				Timestamp: 1,
+				Value:     3,
+			}
+
+			// Append only one series and one metadata first, expect no error.
+			ctx := user.InjectOrgID(context.Background(), userID)
+			_, err := ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1}, []cortexpbv2.Sample{sample1}, nil, []cortexpbv2.Metadata{{HelpRef: 5, Type: cortexpbv2.METRIC_TYPE_COUNTER}}, cortexpbv2.API, "a help for testmetric"))
+			require.NoError(t, err)
+
+			testLimits := func() {
+				// Append to two series, expect series-exceeded error.
+				_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1, labels3}, []cortexpbv2.Sample{sample2, sample3}, nil, nil, cortexpbv2.API))
+				httpResp, ok := httpgrpc.HTTPResponseFromError(err)
+				require.True(t, ok, "returned error is not an httpgrpc response")
+				assert.Equal(t, http.StatusBadRequest, int(httpResp.Code))
+				assert.Equal(t, wrapWithUser(makeLimitError(perUserSeriesLimit, ing.limiter.FormatError(userID, errMaxSeriesPerUserLimitExceeded)), userID).Error(), string(httpResp.Body))
+
+				// Append two metadata, expect no error since metadata is a best effort approach.
+				_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1, labels3}, nil, nil, []cortexpbv2.Metadata{{HelpRef: 7, Type: cortexpbv2.METRIC_TYPE_COUNTER}, {HelpRef: 8, Type: cortexpbv2.METRIC_TYPE_COUNTER}}, cortexpbv2.API, "a help for testmetric", "a help for testmetric2"))
+				require.NoError(t, err)
+
+				// Read samples back via ingester queries.
+				res, _, err := runTestQuery(ctx, t, ing, labels.MatchEqual, model.MetricNameLabel, "testmetric")
+				require.NoError(t, err)
+
+				expected := model.Matrix{
+					{
+						Metric: cortexpb.FromLabelAdaptersToMetric(cortexpb.FromLabelsToLabelAdapters(labels1)),
+						Values: []model.SamplePair{
+							{
+								Timestamp: model.Time(sample1.Timestamp),
+								Value:     model.SampleValue(sample1.Value),
+							},
+							{
+								Timestamp: model.Time(sample2.Timestamp),
+								Value:     model.SampleValue(sample2.Value),
+							},
+						},
+					},
+				}
+
+				// Verify samples
+				require.Equal(t, expected, res)
+
+				// Verify metadata
+				m, err := ing.MetricsMetadata(ctx, nil)
+				require.NoError(t, err)
+				resultMetadata := &cortexpb.MetricMetadata{MetricFamilyName: "testmetric", Help: "a help for testmetric", Type: cortexpb.COUNTER}
+				assert.Equal(t, []*cortexpb.MetricMetadata{resultMetadata}, m.Metadata)
+			}
+
+			testLimits()
+
+			// Limits should hold after restart.
+			services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck
+			ing = ingGenerator()
+			defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck
+
+			testLimits()
+		})
+	}
+
+}
+
+func TestIngesterPRW2_PerLabelsetLimitExceeded(t *testing.T) {
+	limits := defaultLimitsTestConfig()
+	userID := "1"
+	registry := prometheus.NewRegistry()
+
+	limits.LimitsPerLabelSet = []validation.LimitsPerLabelSet{
+		{
+			LabelSet: labels.FromMap(map[string]string{
+				"label1": "value1",
+			}),
+			Limits: validation.LimitsPerLabelSetEntry{
+				MaxSeries: 3,
+			},
+		},
+		{
+			LabelSet: labels.FromMap(map[string]string{
+				"label2": "value2",
+			}),
+			Limits: validation.LimitsPerLabelSetEntry{
+				MaxSeries: 2,
+			},
+		},
+	}
+	tenantLimits := newMockTenantLimits(map[string]*validation.Limits{userID: &limits})
+
+	b, err := json.Marshal(limits)
+	require.NoError(t, err)
+	require.NoError(t, limits.UnmarshalJSON(b))
+
+	dir := t.TempDir()
+	chunksDir := filepath.Join(dir, "chunks")
+	blocksDir := filepath.Join(dir, "blocks")
+	require.NoError(t, os.Mkdir(chunksDir, os.ModePerm))
+	require.NoError(t, os.Mkdir(blocksDir, os.ModePerm))
+
+	ing, err := prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, tenantLimits, blocksDir, registry, true)
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing))
+	// Wait until it's ACTIVE
+	test.Poll(t, time.Second, ring.ACTIVE, func() interface{} {
+		return ing.lifecycler.GetState()
+	})
+
+	ctx := user.InjectOrgID(context.Background(), userID)
+	samples := []cortexpbv2.Sample{{Value: 2, Timestamp: 10}}
+
+	// Create first series within the limits
+	for _, set := range limits.LimitsPerLabelSet {
+		lbls := []string{labels.MetricName, "metric_name"}
+		for _, lbl := range set.LabelSet {
+			lbls = append(lbls, lbl.Name, lbl.Value)
+		}
+		for i := 0; i < set.Limits.MaxSeries; i++ {
+			_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2(
+				[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpbv2.API))
+			require.NoError(t, err)
+		}
+	}
+
+	ing.updateActiveSeries(ctx)
+	require.NoError(t, testutil.GatherAndCompare(registry, bytes.NewBufferString(`
+				# HELP cortex_ingester_limits_per_labelset Limits per user and labelset.
+				# TYPE cortex_ingester_limits_per_labelset gauge
+				cortex_ingester_limits_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3
+				cortex_ingester_limits_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2
+				# HELP cortex_ingester_usage_per_labelset Current usage per user and labelset.
+				# TYPE cortex_ingester_usage_per_labelset gauge
+				cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3
+				cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2
+	`), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset"))
+
+	// Should impose limits
+	for _, set := range limits.LimitsPerLabelSet {
+		lbls := []string{labels.MetricName, "metric_name"}
+		for _, lbl := range set.LabelSet {
+			lbls = append(lbls, lbl.Name, lbl.Value)
+		}
+		_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2(
+			[]labels.Labels{labels.FromStrings(append(lbls, "newLabel", "newValue")...)}, samples, nil, nil, cortexpbv2.API))
+		httpResp, ok := httpgrpc.HTTPResponseFromError(err)
+		require.True(t, ok, "returned error is not an httpgrpc response")
+		assert.Equal(t, http.StatusBadRequest, int(httpResp.Code))
+		require.ErrorContains(t, err, set.Id)
+	}
+
+	ing.updateActiveSeries(ctx)
+	require.NoError(t, testutil.GatherAndCompare(registry, bytes.NewBufferString(`
+				# HELP cortex_discarded_samples_total The total number of samples that were discarded.
+				# TYPE cortex_discarded_samples_total counter
+				cortex_discarded_samples_total{reason="per_labelset_series_limit",user="1"} 2
+				# HELP cortex_ingester_limits_per_labelset Limits per user and labelset.
+				# TYPE cortex_ingester_limits_per_labelset gauge
+				cortex_ingester_limits_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3
+				cortex_ingester_limits_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2
+				# HELP cortex_ingester_usage_per_labelset Current usage per user and labelset.
+				# TYPE cortex_ingester_usage_per_labelset gauge
+				cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3
+				cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2
+	`), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset", "cortex_discarded_samples_total"))
+
+	// Should apply composite limits
+	limits.LimitsPerLabelSet = append(limits.LimitsPerLabelSet,
+		validation.LimitsPerLabelSet{LabelSet: labels.FromMap(map[string]string{
+			"comp1": "compValue1",
+		}),
+			Limits: validation.LimitsPerLabelSetEntry{
+				MaxSeries: 10,
+			},
+		},
+		validation.LimitsPerLabelSet{LabelSet: labels.FromMap(map[string]string{
+			"comp2": "compValue2",
+		}),
+			Limits: validation.LimitsPerLabelSetEntry{
+				MaxSeries: 10,
+			},
+		},
+		validation.LimitsPerLabelSet{LabelSet: labels.FromMap(map[string]string{
+			"comp1": "compValue1",
+			"comp2": "compValue2",
+		}),
+			Limits: validation.LimitsPerLabelSetEntry{
+				MaxSeries: 2,
+			},
+		},
+	)
+
+	b, err = json.Marshal(limits)
+	require.NoError(t, err)
+	require.NoError(t, limits.UnmarshalJSON(b))
+	tenantLimits.setLimits(userID, &limits)
+
+	// Should backfill
+	ing.updateActiveSeries(ctx)
+	require.NoError(t, testutil.GatherAndCompare(registry, bytes.NewBufferString(`
+				# HELP cortex_discarded_samples_total The total number of samples that were discarded.
+				# TYPE cortex_discarded_samples_total counter
+				cortex_discarded_samples_total{reason="per_labelset_series_limit",user="1"} 2
+				# HELP cortex_ingester_limits_per_labelset Limits per user and labelset.
+				# TYPE cortex_ingester_limits_per_labelset gauge
+				cortex_ingester_limits_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",limit="max_series",user="1"} 2
+				cortex_ingester_limits_per_labelset{labelset="{comp1=\"compValue1\"}",limit="max_series",user="1"} 10
+				cortex_ingester_limits_per_labelset{labelset="{comp2=\"compValue2\"}",limit="max_series",user="1"} 10
+				cortex_ingester_limits_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3
+				cortex_ingester_limits_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2
+				# HELP cortex_ingester_usage_per_labelset Current usage per user and labelset.
+				# TYPE cortex_ingester_usage_per_labelset gauge
+				cortex_ingester_usage_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",limit="max_series",user="1"} 0
+				cortex_ingester_usage_per_labelset{labelset="{comp1=\"compValue1\"}",limit="max_series",user="1"} 0
+				cortex_ingester_usage_per_labelset{labelset="{comp2=\"compValue2\"}",limit="max_series",user="1"} 0
+				cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3
+				cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2
+	`), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset", "cortex_discarded_samples_total"))
+
+	// Adding 5 metrics with only 1 label
+	for i := 0; i < 5; i++ {
+		lbls := []string{labels.MetricName, "metric_name", "comp1", "compValue1"}
+		_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2(
+			[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpbv2.API))
+		require.NoError(t, err)
+	}
+
+	// Adding 2 metrics with both labels (still below the limit)
+	lbls := []string{labels.MetricName, "metric_name", "comp1", "compValue1", "comp2", "compValue2"}
+	for i := 0; i < 2; i++ {
+		_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2(
+			[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpbv2.API))
+		require.NoError(t, err)
+	}
+
+	// Now we should hit the limit as we already have 2 metrics with comp1=compValue1, comp2=compValue2
+	_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2(
+		[]labels.Labels{labels.FromStrings(append(lbls, "newLabel", "newValue")...)}, samples, nil, nil, cortexpbv2.API))
+	httpResp, ok := httpgrpc.HTTPResponseFromError(err)
+	require.True(t, ok, "returned error is not an httpgrpc response")
+	assert.Equal(t, http.StatusBadRequest, int(httpResp.Code))
+	require.ErrorContains(t, err, labels.FromStrings("comp1", "compValue1", "comp2", "compValue2").String())
+
+	ing.updateActiveSeries(ctx)
+	require.NoError(t, testutil.GatherAndCompare(registry, bytes.NewBufferString(`
+				# HELP cortex_discarded_samples_total The total number of samples that were discarded.
+				# TYPE cortex_discarded_samples_total counter
+				cortex_discarded_samples_total{reason="per_labelset_series_limit",user="1"} 3
+				# HELP cortex_ingester_limits_per_labelset Limits per user and labelset.
+				# TYPE cortex_ingester_limits_per_labelset gauge
+				cortex_ingester_limits_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",limit="max_series",user="1"} 2
+				cortex_ingester_limits_per_labelset{labelset="{comp1=\"compValue1\"}",limit="max_series",user="1"} 10
+				cortex_ingester_limits_per_labelset{labelset="{comp2=\"compValue2\"}",limit="max_series",user="1"} 10
+				cortex_ingester_limits_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3
+				cortex_ingester_limits_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2
+				# HELP cortex_ingester_usage_per_labelset Current usage per user and labelset.
+				# TYPE cortex_ingester_usage_per_labelset gauge
+				cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3
+				cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2
+				cortex_ingester_usage_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",limit="max_series",user="1"} 2
+				cortex_ingester_usage_per_labelset{labelset="{comp1=\"compValue1\"}",limit="max_series",user="1"} 7
+				cortex_ingester_usage_per_labelset{labelset="{comp2=\"compValue2\"}",limit="max_series",user="1"} 2
+		`), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset", "cortex_discarded_samples_total"))
+
+	// Should bootstrap and apply limits when configuration change
+	limits.LimitsPerLabelSet = append(limits.LimitsPerLabelSet,
+		validation.LimitsPerLabelSet{LabelSet: labels.FromMap(map[string]string{
+			labels.MetricName: "metric_name",
+			"comp2":           "compValue2",
+		}),
+			Limits: validation.LimitsPerLabelSetEntry{
+				MaxSeries: 3, // we already have 2 so we need to allow 1 more
+			},
+		},
+	)
+
+	b, err = json.Marshal(limits)
+	require.NoError(t, err)
+	require.NoError(t, limits.UnmarshalJSON(b))
+	tenantLimits.setLimits(userID, &limits)
+
+	lbls = []string{labels.MetricName, "metric_name", "comp2", "compValue2"}
+	_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2(
+		[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", "extraValueUpdate")...)}, samples, nil, nil, cortexpbv2.API))
+	require.NoError(t, err)
+
+	_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2(
+		[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", "extraValueUpdate2")...)}, samples, nil, nil, cortexpbv2.API))
+	httpResp, ok = httpgrpc.HTTPResponseFromError(err)
+	require.True(t, ok, "returned error is not an httpgrpc response")
+	assert.Equal(t, http.StatusBadRequest, int(httpResp.Code))
+	require.ErrorContains(t, err, labels.FromStrings(lbls...).String())
+
+	ing.updateActiveSeries(ctx)
+	require.NoError(t, testutil.GatherAndCompare(registry, bytes.NewBufferString(`
+				# HELP cortex_ingester_limits_per_labelset Limits per user and labelset.
+				# TYPE cortex_ingester_limits_per_labelset gauge
+				cortex_ingester_limits_per_labelset{labelset="{__name__=\"metric_name\", comp2=\"compValue2\"}",limit="max_series",user="1"} 3
+				cortex_ingester_limits_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",limit="max_series",user="1"} 2
+				cortex_ingester_limits_per_labelset{labelset="{comp1=\"compValue1\"}",limit="max_series",user="1"} 10
+				cortex_ingester_limits_per_labelset{labelset="{comp2=\"compValue2\"}",limit="max_series",user="1"} 10
+				cortex_ingester_limits_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3
+				cortex_ingester_limits_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2
+				# HELP cortex_ingester_usage_per_labelset Current usage per user and labelset.
+				# TYPE cortex_ingester_usage_per_labelset gauge
+				cortex_ingester_usage_per_labelset{labelset="{__name__=\"metric_name\", comp2=\"compValue2\"}",limit="max_series",user="1"} 3
+				cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3
+				cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2
+				cortex_ingester_usage_per_labelset{labelset="{comp1=\"compValue1\", comp2=\"compValue2\"}",limit="max_series",user="1"} 2
+				cortex_ingester_usage_per_labelset{labelset="{comp1=\"compValue1\"}",limit="max_series",user="1"} 7
+				cortex_ingester_usage_per_labelset{labelset="{comp2=\"compValue2\"}",limit="max_series",user="1"} 3
+	`), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset"))
+
+	// Should remove metrics when the limits is removed
+	limits.LimitsPerLabelSet = limits.LimitsPerLabelSet[:2]
+	b, err = json.Marshal(limits)
+	require.NoError(t, err)
+	require.NoError(t, limits.UnmarshalJSON(b))
+	tenantLimits.setLimits(userID, &limits)
+	ing.updateActiveSeries(ctx)
+	require.NoError(t, testutil.GatherAndCompare(registry, bytes.NewBufferString(`
+				# HELP cortex_ingester_limits_per_labelset Limits per user and labelset.
+				# TYPE cortex_ingester_limits_per_labelset gauge
+				cortex_ingester_limits_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3
+				cortex_ingester_limits_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2
+				# HELP cortex_ingester_usage_per_labelset Current usage per user and labelset.
+				# TYPE cortex_ingester_usage_per_labelset gauge
+				cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3
+				cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2
+	`), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset"))
+
+	// Should persist between restarts
+	services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck
+	registry = prometheus.NewRegistry()
+	ing, err = prepareIngesterWithBlocksStorageAndLimits(t, defaultIngesterTestConfig(t), limits, tenantLimits, blocksDir, registry, true)
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing))
+	ing.updateActiveSeries(ctx)
+	require.NoError(t, testutil.GatherAndCompare(registry, bytes.NewBufferString(`
+				# HELP cortex_ingester_limits_per_labelset Limits per user and labelset.
+				# TYPE cortex_ingester_limits_per_labelset gauge
+				cortex_ingester_limits_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3
+				cortex_ingester_limits_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2
+				# HELP cortex_ingester_usage_per_labelset Current usage per user and labelset.
+				# TYPE cortex_ingester_usage_per_labelset gauge
+				cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3
+				cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2
+	`), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset"))
+	services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck
+
+}
+
+// Referred from https://github.com/prometheus/prometheus/blob/v2.52.1/model/histogram/histogram_test.go#L985.
+func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) {
+	metricLabelAdapters := []cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}}
+	metricLabels := cortexpb.FromLabelAdaptersToLabels(metricLabelAdapters)
+	for _, tc := range []struct {
+		name        string
+		histograms  []cortexpbv2.Histogram
+		expectedErr error
+	}{
+		{
+			name: "rejects histogram with NaN observations that has its Count (2) lower than the actual total of buckets (2 + 1)",
+			histograms: []cortexpbv2.Histogram{
+				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+					ZeroCount:       2,
+					Count:           2,
+					Sum:             math.NaN(),
+					PositiveSpans:   []histogram.Span{{Offset: 0, Length: 1}},
+					PositiveBuckets: []int64{1},
+				}),
+			},
+			expectedErr: fmt.Errorf("3 observations found in buckets, but the Count field is 2: %w", histogram.ErrHistogramCountNotBigEnough),
+		},
+		{
+			name: "rejects histogram without NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)",
+			histograms: []cortexpbv2.Histogram{
+				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+					ZeroCount:       2,
+					Count:           4,
+					Sum:             333,
+					PositiveSpans:   []histogram.Span{{Offset: 0, Length: 1}},
+					PositiveBuckets: []int64{1},
+				}),
+			},
+			expectedErr: fmt.Errorf("3 observations found in buckets, but the Count field is 4: %w", histogram.ErrHistogramCountMismatch),
+		},
+		{
+			name: "rejects histogram that has too few negative buckets",
+			histograms: []cortexpbv2.Histogram{
+				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+					NegativeSpans:   []histogram.Span{{Offset: 0, Length: 1}},
+					NegativeBuckets: []int64{},
+				}),
+			},
+			expectedErr: fmt.Errorf("negative side: spans need 1 buckets, have 0 buckets: %w", histogram.ErrHistogramSpansBucketsMismatch),
+		},
+		{
+			name: "rejects histogram that has too few positive buckets",
+			histograms: []cortexpbv2.Histogram{
+				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+					PositiveSpans:   []histogram.Span{{Offset: 0, Length: 1}},
+					PositiveBuckets: []int64{},
+				}),
+			},
+			expectedErr: fmt.Errorf("positive side: spans need 1 buckets, have 0 buckets: %w", histogram.ErrHistogramSpansBucketsMismatch),
+		},
+		{
+			name: "rejects histogram that has too many negative buckets",
+			histograms: []cortexpbv2.Histogram{
+				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+					NegativeSpans:   []histogram.Span{{Offset: 0, Length: 1}},
+					NegativeBuckets: []int64{1, 2},
+				}),
+			},
+			expectedErr: fmt.Errorf("negative side: spans need 1 buckets, have 2 buckets: %w", histogram.ErrHistogramSpansBucketsMismatch),
+		},
+		{
+			name: "rejects histogram that has too many positive buckets",
+			histograms: []cortexpbv2.Histogram{
+				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+					PositiveSpans:   []histogram.Span{{Offset: 0, Length: 1}},
+					PositiveBuckets: []int64{1, 2},
+				}),
+			},
+			expectedErr: fmt.Errorf("positive side: spans need 1 buckets, have 2 buckets: %w", histogram.ErrHistogramSpansBucketsMismatch),
+		},
+		{
+			name: "rejects a histogram that has a negative span with a negative offset",
+			histograms: []cortexpbv2.Histogram{
+				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+					NegativeSpans:   []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}},
+					NegativeBuckets: []int64{1, 2},
+				}),
+			},
+			expectedErr: fmt.Errorf("negative side: span number 2 with offset -1: %w", histogram.ErrHistogramSpanNegativeOffset),
+		},
+		{
+			name: "rejects a histogram that has a positive span with a negative offset",
+			histograms: []cortexpbv2.Histogram{
+				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+					PositiveSpans:   []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}},
+					PositiveBuckets: []int64{1, 2},
+				}),
+			},
+			expectedErr: fmt.Errorf("positive side: span number 2 with offset -1: %w", histogram.ErrHistogramSpanNegativeOffset),
+		},
+		{
+			name: "rejects a histogram that has a negative span with a negative count",
+			histograms: []cortexpbv2.Histogram{
+				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+					NegativeSpans:   []histogram.Span{{Offset: -1, Length: 1}},
+					NegativeBuckets: []int64{-1},
+				}),
+			},
+			expectedErr: fmt.Errorf("negative side: bucket number 1 has observation count of -1: %w", histogram.ErrHistogramNegativeBucketCount),
+		},
+		{
+			name: "rejects a histogram that has a positive span with a negative count",
+			histograms: []cortexpbv2.Histogram{
+				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+					PositiveSpans:   []histogram.Span{{Offset: -1, Length: 1}},
+					PositiveBuckets: []int64{-1},
+				}),
+			},
+			expectedErr: fmt.Errorf("positive side: bucket number 1 has observation count of -1: %w", histogram.ErrHistogramNegativeBucketCount),
+		},
+		{
+			name: "rejects a histogram that has a lower count than count in buckets",
+			histograms: []cortexpbv2.Histogram{
+				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+					Count:           0,
+					NegativeSpans:   []histogram.Span{{Offset: -1, Length: 1}},
+					PositiveSpans:   []histogram.Span{{Offset: -1, Length: 1}},
+					NegativeBuckets: []int64{1},
+					PositiveBuckets: []int64{1},
+				}),
+			},
+			expectedErr: fmt.Errorf("2 observations found in buckets, but the Count field is 0: %w", histogram.ErrHistogramCountMismatch),
+		},
+		{
+			name: "rejects a histogram that doesn't count the zero bucket in its count",
+			histograms: []cortexpbv2.Histogram{
+				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+					Count:           2,
+					ZeroCount:       1,
+					NegativeSpans:   []histogram.Span{{Offset: -1, Length: 1}},
+					PositiveSpans:   []histogram.Span{{Offset: -1, Length: 1}},
+					NegativeBuckets: []int64{1},
+					PositiveBuckets: []int64{1},
+				}),
+			},
+			expectedErr: fmt.Errorf("3 observations found in buckets, but the Count field is 2: %w", histogram.ErrHistogramCountMismatch),
+		},
+	} {
+		t.Run(tc.name, func(t *testing.T) {
+			registry := prometheus.NewRegistry()
+
+			// Create a mocked ingester
+			cfg := defaultIngesterTestConfig(t)
+			cfg.LifecyclerConfig.JoinAfter = 0
+
+			limits := defaultLimitsTestConfig()
+			i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", registry, true)
+			require.NoError(t, err)
+			require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+			defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+			ctx := user.InjectOrgID(context.Background(), userID)
+
+			// Wait until the ingester is ACTIVE
+			test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} {
+				return i.lifecycler.GetState()
+			})
+
+			req := cortexpbv2.ToWriteRequestV2([]labels.Labels{metricLabels}, nil, tc.histograms, nil, cortexpbv2.API)
+			// Push timeseries
+			_, err = i.PushV2(ctx, req)
+			assert.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(tc.expectedErr, model.Time(10), metricLabelAdapters), userID).Error()), err)
+
+			require.Equal(t, testutil.ToFloat64(i.metrics.ingestedSamplesFail), float64(1))
+		})
+	}
+}
+
+func TestIngesterPRW2_Push_ShouldCorrectlyTrackMetricsInMultiTenantScenario(t *testing.T) {
+	metricLabelAdapters := []cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}}
+	metricLabels := cortexpb.FromLabelAdaptersToLabels(metricLabelAdapters)
+	metricNames := []string{
+		"cortex_ingester_ingested_samples_total",
+		"cortex_ingester_ingested_samples_failures_total",
+		"cortex_ingester_memory_series",
+		"cortex_ingester_memory_users",
+		"cortex_ingester_memory_series_created_total",
+		"cortex_ingester_memory_series_removed_total",
+		"cortex_ingester_active_series",
+	}
+
+	registry := prometheus.NewRegistry()
+
+	// Create a mocked ingester
+	cfg := defaultIngesterTestConfig(t)
+	cfg.LifecyclerConfig.JoinAfter = 0
+
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, registry)
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until the ingester is ACTIVE
+	test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	// Push timeseries for each user
+	for _, userID := range []string{"test-1", "test-2"} {
+		reqs := []*cortexpbv2.WriteRequest{
+			cortexpbv2.ToWriteRequestV2(
+				[]labels.Labels{metricLabels},
+				[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+				nil,
+				nil,
+				cortexpbv2.API),
+			cortexpbv2.ToWriteRequestV2(
+				[]labels.Labels{metricLabels},
+				[]cortexpbv2.Sample{{Value: 2, Timestamp: 10}},
+				nil,
+				nil,
+				cortexpbv2.API),
+		}
+
+		for _, req := range reqs {
+			ctx := user.InjectOrgID(context.Background(), userID)
+			_, err := i.PushV2(ctx, req)
+			require.NoError(t, err)
+		}
+	}
+
+	// Update active series for metrics check.
+	i.updateActiveSeries(context.Background())
+
+	// Check tracked Prometheus metrics
+	expectedMetrics := `
+		# HELP cortex_ingester_ingested_samples_total The total number of samples ingested.
+		# TYPE cortex_ingester_ingested_samples_total counter
+		cortex_ingester_ingested_samples_total 4
+		# HELP cortex_ingester_ingested_samples_failures_total The total number of samples that errored on ingestion.
+		# TYPE cortex_ingester_ingested_samples_failures_total counter
+		cortex_ingester_ingested_samples_failures_total 0
+		# HELP cortex_ingester_memory_users The current number of users in memory.
+		# TYPE cortex_ingester_memory_users gauge
+		cortex_ingester_memory_users 2
+		# HELP cortex_ingester_memory_series The current number of series in memory.
+		# TYPE cortex_ingester_memory_series gauge
+		cortex_ingester_memory_series 2
+		# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+		# TYPE cortex_ingester_memory_series_created_total counter
+		cortex_ingester_memory_series_created_total{user="test-1"} 1
+		cortex_ingester_memory_series_created_total{user="test-2"} 1
+		# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+		# TYPE cortex_ingester_memory_series_removed_total counter
+		cortex_ingester_memory_series_removed_total{user="test-1"} 0
+		cortex_ingester_memory_series_removed_total{user="test-2"} 0
+		# HELP cortex_ingester_active_series Number of currently active series per user.
+		# TYPE cortex_ingester_active_series gauge
+		cortex_ingester_active_series{user="test-1"} 1
+		cortex_ingester_active_series{user="test-2"} 1
+	`
+
+	assert.NoError(t, testutil.GatherAndCompare(registry, strings.NewReader(expectedMetrics), metricNames...))
+}
+
+func TestIngesterPRW2_Push_DecreaseInactiveSeries(t *testing.T) {
+	metricLabelAdapters := []cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}}
+	metricLabels := cortexpb.FromLabelAdaptersToLabels(metricLabelAdapters)
+	metricNames := []string{
+		"cortex_ingester_memory_series_created_total",
+		"cortex_ingester_memory_series_removed_total",
+		"cortex_ingester_active_series",
+	}
+
+	registry := prometheus.NewRegistry()
+
+	// Create a mocked ingester
+	cfg := defaultIngesterTestConfig(t)
+	cfg.ActiveSeriesMetricsIdleTimeout = 100 * time.Millisecond
+	cfg.LifecyclerConfig.JoinAfter = 0
+
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, registry)
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until the ingester is ACTIVE
+	test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	// Push timeseries for each user
+	for _, userID := range []string{"test-1", "test-2"} {
+		reqs := []*cortexpbv2.WriteRequest{
+			cortexpbv2.ToWriteRequestV2(
+				[]labels.Labels{metricLabels},
+				[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+				nil,
+				nil,
+				cortexpbv2.API),
+			cortexpbv2.ToWriteRequestV2(
+				[]labels.Labels{metricLabels},
+				[]cortexpbv2.Sample{{Value: 2, Timestamp: 10}},
+				nil,
+				nil,
+				cortexpbv2.API),
+		}
+
+		for _, req := range reqs {
+			ctx := user.InjectOrgID(context.Background(), userID)
+			_, err := i.PushV2(ctx, req)
+			require.NoError(t, err)
+		}
+	}
+
+	// Wait a bit to make series inactive (set to 100ms above).
+	time.Sleep(200 * time.Millisecond)
+
+	// Update active series for metrics check. This will remove inactive series.
+	i.updateActiveSeries(context.Background())
+
+	// Check tracked Prometheus metrics
+	expectedMetrics := `
+		# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+		# TYPE cortex_ingester_memory_series_created_total counter
+		cortex_ingester_memory_series_created_total{user="test-1"} 1
+		cortex_ingester_memory_series_created_total{user="test-2"} 1
+		# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+		# TYPE cortex_ingester_memory_series_removed_total counter
+		cortex_ingester_memory_series_removed_total{user="test-1"} 0
+		cortex_ingester_memory_series_removed_total{user="test-2"} 0
+		# HELP cortex_ingester_active_series Number of currently active series per user.
+		# TYPE cortex_ingester_active_series gauge
+		cortex_ingester_active_series{user="test-1"} 0
+		cortex_ingester_active_series{user="test-2"} 0
+	`
+
+	assert.NoError(t, testutil.GatherAndCompare(registry, strings.NewReader(expectedMetrics), metricNames...))
+}
+
+func BenchmarkIngesterPRW2Push(b *testing.B) {
+	limits := defaultLimitsTestConfig()
+	benchmarkIngesterPRW2Push(b, limits, false)
+}
+
+func benchmarkIngesterPRW2Push(b *testing.B, limits validation.Limits, errorsExpected bool) {
+	registry := prometheus.NewRegistry()
+	ctx := user.InjectOrgID(context.Background(), userID)
+
+	// Create a mocked ingester
+	cfg := defaultIngesterTestConfig(b)
+	cfg.LifecyclerConfig.JoinAfter = 0
+
+	ingester, err := prepareIngesterWithBlocksStorage(b, cfg, registry)
+	require.NoError(b, err)
+	require.NoError(b, services.StartAndAwaitRunning(context.Background(), ingester))
+	defer services.StopAndAwaitTerminated(context.Background(), ingester) //nolint:errcheck
+
+	// Wait until the ingester is ACTIVE
+	test.Poll(b, 100*time.Millisecond, ring.ACTIVE, func() interface{} {
+		return ingester.lifecycler.GetState()
+	})
+
+	// Push a single time series to set the TSDB min time.
+	metricLabelAdapters := []cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}}
+	metricLabels := cortexpb.FromLabelAdaptersToLabels(metricLabelAdapters)
+	startTime := util.TimeToMillis(time.Now())
+
+	currTimeReq := cortexpbv2.ToWriteRequestV2(
+		[]labels.Labels{metricLabels},
+		[]cortexpbv2.Sample{{Value: 1, Timestamp: startTime}},
+		nil,
+		nil,
+		cortexpbv2.API)
+	_, err = ingester.PushV2(ctx, currTimeReq)
+	require.NoError(b, err)
+
+	const (
+		series  = 10000
+		samples = 10
+	)
+
+	allLabels, allSamples := benchmarkDataV2(series)
+
+	b.ResetTimer()
+	for iter := 0; iter < b.N; iter++ {
+		// Bump the timestamp on each of our test samples each time round the loop
+		for j := 0; j < samples; j++ {
+			for i := range allSamples {
+				allSamples[i].Timestamp = startTime + int64(iter*samples+j+1)
+			}
+			_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(allLabels, allSamples, nil, nil, cortexpbv2.API))
+			if !errorsExpected {
+				require.NoError(b, err)
+			}
+		}
+	}
+}
+
+func Benchmark_IngesterPRW2_PushOnError(b *testing.B) {
+	var (
+		ctx             = user.InjectOrgID(context.Background(), userID)
+		sampleTimestamp = int64(100)
+		metricName      = "test"
+	)
+
+	scenarios := map[string]struct {
+		numSeriesPerRequest  int
+		numConcurrentClients int
+	}{
+		"no concurrency": {
+			numSeriesPerRequest:  1000,
+			numConcurrentClients: 1,
+		},
+		"low concurrency": {
+			numSeriesPerRequest:  1000,
+			numConcurrentClients: 100,
+		},
+		"high concurrency": {
+			numSeriesPerRequest:  1000,
+			numConcurrentClients: 1000,
+		},
+	}
+
+	instanceLimits := map[string]*InstanceLimits{
+		"no limits":  nil,
+		"limits set": {MaxIngestionRate: 1000, MaxInMemoryTenants: 1, MaxInMemorySeries: 1000, MaxInflightPushRequests: 1000}, // these match max values from scenarios
+	}
+
+	tests := map[string]struct {
+		// If this returns false, test is skipped.
+		prepareConfig   func(limits *validation.Limits, instanceLimits *InstanceLimits) bool
+		beforeBenchmark func(b *testing.B, ingester *Ingester, numSeriesPerRequest int)
+		runBenchmark    func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample)
+	}{
+		"out of bound samples": {
+			prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { return true },
+			beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+				// Push a single time series to set the TSDB min time.
+				currTimeReq := cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{{{Name: labels.MetricName, Value: metricName}}},
+					[]cortexpbv2.Sample{{Value: 1, Timestamp: util.TimeToMillis(time.Now())}},
+					nil,
+					nil,
+					cortexpbv2.API)
+				_, err := ingester.PushV2(ctx, currTimeReq)
+				require.NoError(b, err)
+			},
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+				expectedErr := storage.ErrOutOfBounds.Error()
+				// Push out of bound samples.
+				for n := 0; n < b.N; n++ {
+					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) // nolint:errcheck
+
+					verifyErrorString(b, err, expectedErr)
+				}
+			},
+		},
+		"out of order samples": {
+			prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { return true },
+			beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+				// For each series, push a single sample with a timestamp greater than next pushes.
+				for i := 0; i < numSeriesPerRequest; i++ {
+					currTimeReq := cortexpbv2.ToWriteRequestV2(
+						[]labels.Labels{{{Name: labels.MetricName, Value: metricName}, {Name: "cardinality", Value: strconv.Itoa(i)}}},
+						[]cortexpbv2.Sample{{Value: 1, Timestamp: sampleTimestamp + 1}},
+						nil,
+						nil,
+						cortexpbv2.API)
+
+					_, err := ingester.PushV2(ctx, currTimeReq)
+					require.NoError(b, err)
+				}
+			},
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+				expectedErr := storage.ErrOutOfOrderSample.Error()
+
+				st := writev2.NewSymbolTable()
+				for _, lbs := range metrics {
+					st.SymbolizeLabels(lbs, nil)
+				}
+				// Push out of order samples.
+				for n := 0; n < b.N; n++ {
+					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) // nolint:errcheck
+
+					verifyErrorString(b, err, expectedErr)
+				}
+			},
+		},
+		"per-user series limit reached": {
+			prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool {
+				limits.MaxLocalSeriesPerUser = 1
+				return true
+			},
+			beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+				// Push a series with a metric name different than the one used during the benchmark.
+				currTimeReq := cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{labels.FromStrings(labels.MetricName, "another")},
+					[]cortexpbv2.Sample{{Value: 1, Timestamp: sampleTimestamp + 1}},
+					nil,
+					nil,
+					cortexpbv2.API)
+				_, err := ingester.PushV2(ctx, currTimeReq)
+				require.NoError(b, err)
+			},
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+				// Push series with a different name than the one already pushed.
+
+				st := writev2.NewSymbolTable()
+				for _, lbs := range metrics {
+					st.SymbolizeLabels(lbs, nil)
+				}
+				for n := 0; n < b.N; n++ {
+					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) // nolint:errcheck
+					verifyErrorString(b, err, "per-user series limit")
+				}
+			},
+		},
+		"per-metric series limit reached": {
+			prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool {
+				limits.MaxLocalSeriesPerMetric = 1
+				return true
+			},
+			beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+				// Push a series with the same metric name but different labels than the one used during the benchmark.
+				currTimeReq := cortexpbv2.ToWriteRequestV2(
+					[]labels.Labels{labels.FromStrings(labels.MetricName, metricName, "cardinality", "another")},
+					[]cortexpbv2.Sample{{Value: 1, Timestamp: sampleTimestamp + 1}},
+					nil,
+					nil,
+					cortexpbv2.API)
+				_, err := ingester.PushV2(ctx, currTimeReq)
+				require.NoError(b, err)
+			},
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+				st := writev2.NewSymbolTable()
+				for _, lbs := range metrics {
+					st.SymbolizeLabels(lbs, nil)
+				}
+				// Push series with different labels than the one already pushed.
+				for n := 0; n < b.N; n++ {
+					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) // nolint:errcheck
+					verifyErrorString(b, err, "per-metric series limit")
+				}
+			},
+		},
+		"very low ingestion rate limit": {
+			prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool {
+				if instanceLimits == nil {
+					return false
+				}
+				instanceLimits.MaxIngestionRate = 0.00001 // very low
+				return true
+			},
+			beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+				// Send a lot of samples
+				_, err := ingester.PushV2(ctx, generateSamplesForLabelV2(labels.FromStrings(labels.MetricName, "test"), 10000))
+				require.NoError(b, err)
+
+				ingester.ingestionRate.Tick()
+			},
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+				st := writev2.NewSymbolTable()
+				for _, lbs := range metrics {
+					st.SymbolizeLabels(lbs, nil)
+				}
+				// Push series with different labels than the one already pushed.
+				for n := 0; n < b.N; n++ {
+					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API))
+					verifyErrorString(b, err, "push rate reached")
+				}
+			},
+		},
+		"max number of tenants reached": {
+			prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool {
+				if instanceLimits == nil {
+					return false
+				}
+				instanceLimits.MaxInMemoryTenants = 1
+				return true
+			},
+			beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+				// Send some samples for one tenant (not the same that is used during the test)
+				ctx := user.InjectOrgID(context.Background(), "different_tenant")
+				_, err := ingester.PushV2(ctx, generateSamplesForLabelV2(labels.FromStrings(labels.MetricName, "test"), 10000))
+				require.NoError(b, err)
+			},
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+				st := writev2.NewSymbolTable()
+				for _, lbs := range metrics {
+					st.SymbolizeLabels(lbs, nil)
+				}
+				// Push series with different labels than the one already pushed.
+				for n := 0; n < b.N; n++ {
+					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API))
+					verifyErrorString(b, err, "max tenants limit reached")
+				}
+			},
+		},
+		"max number of series reached": {
+			prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool {
+				if instanceLimits == nil {
+					return false
+				}
+				instanceLimits.MaxInMemorySeries = 1
+				return true
+			},
+			beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+				_, err := ingester.PushV2(ctx, generateSamplesForLabelV2(labels.FromStrings(labels.MetricName, "test"), 10000))
+				require.NoError(b, err)
+			},
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+				st := writev2.NewSymbolTable()
+				for _, lbs := range metrics {
+					st.SymbolizeLabels(lbs, nil)
+				}
+				for n := 0; n < b.N; n++ {
+					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API))
+					verifyErrorString(b, err, "max series limit reached")
+				}
+			},
+		},
+		"max inflight requests reached": {
+			prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool {
+				if instanceLimits == nil {
+					return false
+				}
+				instanceLimits.MaxInflightPushRequests = 1
+				return true
+			},
+			beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
+				ingester.inflightPushRequests.Inc()
+			},
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+				st := writev2.NewSymbolTable()
+				for _, lbs := range metrics {
+					st.SymbolizeLabels(lbs, nil)
+				}
+				for n := 0; n < b.N; n++ {
+					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API))
+					verifyErrorString(b, err, "too many inflight push requests")
+				}
+			},
+		},
+	}
+
+	for testName, testData := range tests {
+		for scenarioName, scenario := range scenarios {
+			for limitsName, limits := range instanceLimits {
+				b.Run(fmt.Sprintf("failure: %s, scenario: %s, limits: %s", testName, scenarioName, limitsName), func(b *testing.B) {
+					registry := prometheus.NewRegistry()
+
+					instanceLimits := limits
+					if instanceLimits != nil {
+						// make a copy, to avoid changing value in the instanceLimits map.
+						newLimits := &InstanceLimits{}
+						*newLimits = *instanceLimits
+						instanceLimits = newLimits
+					}
+
+					// Create a mocked ingester
+					cfg := defaultIngesterTestConfig(b)
+					cfg.LifecyclerConfig.JoinAfter = 0
+
+					limits := defaultLimitsTestConfig()
+					if !testData.prepareConfig(&limits, instanceLimits) {
+						b.SkipNow()
+					}
+
+					cfg.InstanceLimitsFn = func() *InstanceLimits {
+						return instanceLimits
+					}
+
+					ingester, err := prepareIngesterWithBlocksStorageAndLimits(b, cfg, limits, nil, "", registry, true)
+					require.NoError(b, err)
+					require.NoError(b, services.StartAndAwaitRunning(context.Background(), ingester))
+					defer services.StopAndAwaitTerminated(context.Background(), ingester) //nolint:errcheck
+
+					// Wait until the ingester is ACTIVE
+					test.Poll(b, 100*time.Millisecond, ring.ACTIVE, func() interface{} {
+						return ingester.lifecycler.GetState()
+					})
+
+					testData.beforeBenchmark(b, ingester, scenario.numSeriesPerRequest)
+
+					// Prepare the request.
+					metrics := make([]labels.Labels, 0, scenario.numSeriesPerRequest)
+					samples := make([]cortexpbv2.Sample, 0, scenario.numSeriesPerRequest)
+					for i := 0; i < scenario.numSeriesPerRequest; i++ {
+						metrics = append(metrics, labels.Labels{{Name: labels.MetricName, Value: metricName}, {Name: "cardinality", Value: strconv.Itoa(i)}})
+						samples = append(samples, cortexpbv2.Sample{Value: float64(i), Timestamp: sampleTimestamp})
+					}
+
+					// Run the benchmark.
+					wg := sync.WaitGroup{}
+					wg.Add(scenario.numConcurrentClients)
+					start := make(chan struct{})
+
+					b.ReportAllocs()
+					b.ResetTimer()
+
+					for c := 0; c < scenario.numConcurrentClients; c++ {
+						go func() {
+							defer wg.Done()
+							<-start
+
+							testData.runBenchmark(b, ingester, metrics, samples)
+						}()
+					}
+
+					b.ResetTimer()
+					close(start)
+					wg.Wait()
+				})
+			}
+		}
+	}
+}
+
+func TestIngesterPRW2_LabelNames(t *testing.T) {
+	series := []struct {
+		lbls      labels.Labels
+		value     float64
+		timestamp int64
+	}{
+		{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}, {Name: "route", Value: "get_user"}}, 1, 100000},
+		{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "500"}, {Name: "route", Value: "get_user"}}, 1, 110000},
+		{labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000},
+	}
+
+	expected := []string{"__name__", "route", "status"}
+
+	// Create ingester
+	i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	// Push series
+	ctx := user.InjectOrgID(context.Background(), "test")
+
+	for _, series := range series {
+		req, _ := mockWriteRequestV2(t, series.lbls, series.value, series.timestamp)
+		_, err := i.PushV2(ctx, req)
+		require.NoError(t, err)
+	}
+
+	tests := map[string]struct {
+		limit    int
+		expected []string
+	}{
+		"should return all label names if no limit is set": {
+			expected: expected,
+		},
+		"should return limited label names if a limit is set": {
+			limit:    2,
+			expected: expected[:2],
+		},
+	}
+
+	for testName, testData := range tests {
+		t.Run(testName, func(t *testing.T) {
+			// Get label names
+			res, err := i.LabelNames(ctx, &client.LabelNamesRequest{Limit: int64(testData.limit)})
+			require.NoError(t, err)
+			assert.ElementsMatch(t, testData.expected, res.LabelNames)
+		})
+	}
+}
+
+func TestIngesterPRW2_LabelValues(t *testing.T) {
+	series := []struct {
+		lbls      labels.Labels
+		value     float64
+		timestamp int64
+	}{
+		{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}, {Name: "route", Value: "get_user"}}, 1, 100000},
+		{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "500"}, {Name: "route", Value: "get_user"}}, 1, 110000},
+		{labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000},
+	}
+
+	expected := map[string][]string{
+		"__name__": {"test_1", "test_2"},
+		"status":   {"200", "500"},
+		"route":    {"get_user"},
+		"unknown":  {},
+	}
+
+	// Create ingester
+	i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	// Push series
+	ctx := user.InjectOrgID(context.Background(), "test")
+
+	for _, series := range series {
+		req, _ := mockWriteRequestV2(t, series.lbls, series.value, series.timestamp)
+		_, err := i.PushV2(ctx, req)
+		require.NoError(t, err)
+	}
+
+	tests := map[string]struct {
+		limit int64
+		match []*labels.Matcher
+	}{
+		"should return all label values if no limit is set": {
+			limit: 0,
+		},
+		"should return limited label values if a limit is set": {
+			limit: 1,
+		},
+	}
+
+	for testName, testData := range tests {
+		t.Run(testName, func(t *testing.T) {
+			for labelName, expectedValues := range expected {
+				req := &client.LabelValuesRequest{LabelName: labelName, Limit: testData.limit}
+				res, err := i.LabelValues(ctx, req)
+				require.NoError(t, err)
+				if testData.limit > 0 && len(expectedValues) > int(testData.limit) {
+					expectedValues = expectedValues[:testData.limit]
+				}
+				assert.ElementsMatch(t, expectedValues, res.LabelValues)
+			}
+		})
+	}
+
+}
+
+func TestIngesterPRW2_LabelValue_MaxInflightQueryRequest(t *testing.T) {
+	cfg := defaultIngesterTestConfig(t)
+	cfg.DefaultLimits.MaxInflightQueryRequests = 1
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	i.inflightQueryRequests.Add(1)
+
+	// Mock request
+	ctx := user.InjectOrgID(context.Background(), "test")
+
+	wreq, _ := mockWriteRequestV2(t, labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}, {Name: "route", Value: "get_user"}}, 1, 100000)
+	_, err = i.PushV2(ctx, wreq)
+	require.NoError(t, err)
+
+	rreq := &client.LabelValuesRequest{}
+	_, err = i.LabelValues(ctx, rreq)
+	require.Error(t, err)
+	require.Equal(t, err, errTooManyInflightQueryRequests)
+}
+
+func Test_IngesterPRW2_Query(t *testing.T) {
+	series := []struct {
+		lbls      labels.Labels
+		value     float64
+		timestamp int64
+	}{
+		{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000},
+		{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "500"}}, 1, 110000},
+		{labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000},
+	}
+
+	tests := map[string]struct {
+		from     int64
+		to       int64
+		matchers []*client.LabelMatcher
+		expected []cortexpb.TimeSeries
+	}{
+		"should return an empty response if no metric matches": {
+			from: math.MinInt64,
+			to:   math.MaxInt64,
+			matchers: []*client.LabelMatcher{
+				{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "unknown"},
+			},
+			expected: []cortexpb.TimeSeries{},
+		},
+		"should filter series by == matcher": {
+			from: math.MinInt64,
+			to:   math.MaxInt64,
+			matchers: []*client.LabelMatcher{
+				{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
+			},
+			expected: []cortexpb.TimeSeries{
+				{Labels: cortexpb.FromLabelsToLabelAdapters(series[0].lbls), Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 100000}}},
+				{Labels: cortexpb.FromLabelsToLabelAdapters(series[1].lbls), Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 110000}}},
+			},
+		},
+		"should filter series by != matcher": {
+			from: math.MinInt64,
+			to:   math.MaxInt64,
+			matchers: []*client.LabelMatcher{
+				{Type: client.NOT_EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
+			},
+			expected: []cortexpb.TimeSeries{
+				{Labels: cortexpb.FromLabelsToLabelAdapters(series[2].lbls), Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 200000}}},
+			},
+		},
+		"should filter series by =~ matcher": {
+			from: math.MinInt64,
+			to:   math.MaxInt64,
+			matchers: []*client.LabelMatcher{
+				{Type: client.REGEX_MATCH, Name: model.MetricNameLabel, Value: ".*_1"},
+			},
+			expected: []cortexpb.TimeSeries{
+				{Labels: cortexpb.FromLabelsToLabelAdapters(series[0].lbls), Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 100000}}},
+				{Labels: cortexpb.FromLabelsToLabelAdapters(series[1].lbls), Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 110000}}},
+			},
+		},
+		"should filter series by !~ matcher": {
+			from: math.MinInt64,
+			to:   math.MaxInt64,
+			matchers: []*client.LabelMatcher{
+				{Type: client.REGEX_NO_MATCH, Name: model.MetricNameLabel, Value: ".*_1"},
+			},
+			expected: []cortexpb.TimeSeries{
+				{Labels: cortexpb.FromLabelsToLabelAdapters(series[2].lbls), Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 200000}}},
+			},
+		},
+		"should filter series by multiple matchers": {
+			from: math.MinInt64,
+			to:   math.MaxInt64,
+			matchers: []*client.LabelMatcher{
+				{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
+				{Type: client.REGEX_MATCH, Name: "status", Value: "5.."},
+			},
+			expected: []cortexpb.TimeSeries{
+				{Labels: cortexpb.FromLabelsToLabelAdapters(series[1].lbls), Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 110000}}},
+			},
+		},
+		"should filter series by matcher and time range": {
+			from: 100000,
+			to:   100000,
+			matchers: []*client.LabelMatcher{
+				{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
+			},
+			expected: []cortexpb.TimeSeries{
+				{Labels: cortexpb.FromLabelsToLabelAdapters(series[0].lbls), Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 100000}}},
+			},
+		},
+	}
+
+	// Create ingester
+	i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	// Push series
+	ctx := user.InjectOrgID(context.Background(), "test")
+
+	for _, series := range series {
+		req, _ := mockWriteRequestV2(t, series.lbls, series.value, series.timestamp)
+		_, err := i.PushV2(ctx, req)
+		require.NoError(t, err)
+	}
+
+	// Run tests
+	for testName, testData := range tests {
+		t.Run(testName, func(t *testing.T) {
+			req := &client.QueryRequest{
+				StartTimestampMs: testData.from,
+				EndTimestampMs:   testData.to,
+				Matchers:         testData.matchers,
+			}
+
+			s := &mockQueryStreamServer{ctx: ctx}
+			err = i.QueryStream(req, s)
+			require.NoError(t, err)
+			set, err := seriesSetFromResponseStream(s)
+			require.NoError(t, err)
+			r, err := client.SeriesSetToQueryResponse(set)
+			require.NoError(t, err)
+			fmt.Println("testData.expected", testData.expected)
+			fmt.Println("r.Timeseries", r.Timeseries)
+			assert.ElementsMatch(t, testData.expected, r.Timeseries)
+		})
+	}
+}
+
+func TestIngesterPRW2_Query_MaxInflightQueryRequest(t *testing.T) {
+	cfg := defaultIngesterTestConfig(t)
+	cfg.DefaultLimits.MaxInflightQueryRequests = 1
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	i.inflightQueryRequests.Add(1)
+
+	// Mock request
+	ctx := user.InjectOrgID(context.Background(), "test")
+
+	wreq, _ := mockWriteRequestV2(t, labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}, {Name: "route", Value: "get_user"}}, 1, 100000)
+	_, err = i.PushV2(ctx, wreq)
+	require.NoError(t, err)
+
+	rreq := &client.QueryRequest{}
+	s := &mockQueryStreamServer{ctx: ctx}
+	err = i.QueryStream(rreq, s)
+	require.Error(t, err)
+	require.Equal(t, err, errTooManyInflightQueryRequests)
+}
+
+func TestIngesterPRW2_Push_ShouldNotCreateTSDBIfNotInActiveState(t *testing.T) {
+	// Configure the lifecycler to not immediately join the ring, to make sure
+	// the ingester will NOT be in the ACTIVE state when we'll push samples.
+	cfg := defaultIngesterTestConfig(t)
+	cfg.LifecyclerConfig.JoinAfter = 10 * time.Second
+
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+	require.Equal(t, ring.PENDING, i.lifecycler.GetState())
+
+	// Mock request
+	userID := "test"
+	ctx := user.InjectOrgID(context.Background(), userID)
+	req := &cortexpbv2.WriteRequest{}
+
+	res, err := i.PushV2(ctx, req)
+	assert.Equal(t, wrapWithUser(fmt.Errorf(errTSDBCreateIncompatibleState, "PENDING"), userID).Error(), err.Error())
+	assert.Nil(t, res)
+
+	// Check if the TSDB has been created
+	_, tsdbCreated := i.TSDBState.dbs[userID]
+	assert.False(t, tsdbCreated)
+}
+
+func TestIngesterPRW2_MetricsForLabelMatchers(t *testing.T) {
+	fixtures := []struct {
+		lbls      labels.Labels
+		value     float64
+		timestamp int64
+	}{
+		{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}}, 1, 100000},
+		{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "500"}}, 1, 110000},
+		{labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000},
+		// The two following series have the same FastFingerprint=e002a3a451262627
+		{labels.Labels{{Name: labels.MetricName, Value: "collision"}, {Name: "app", Value: "l"}, {Name: "uniq0", Value: "0"}, {Name: "uniq1", Value: "1"}}, 1, 300000},
+		{labels.Labels{{Name: labels.MetricName, Value: "collision"}, {Name: "app", Value: "m"}, {Name: "uniq0", Value: "1"}, {Name: "uniq1", Value: "1"}}, 1, 300000},
+	}
+
+	tests := map[string]struct {
+		from                 int64
+		to                   int64
+		limit                int64
+		matchers             []*client.LabelMatchers
+		expected             []*cortexpb.Metric
+		queryIngestersWithin time.Duration
+	}{
+		"should return an empty response if no metric match": {
+			from: math.MinInt64,
+			to:   math.MaxInt64,
+			matchers: []*client.LabelMatchers{{
+				Matchers: []*client.LabelMatcher{
+					{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "unknown"},
+				},
+			}},
+			expected: []*cortexpb.Metric{},
+		},
+		"should filter metrics by single matcher": {
+			from: math.MinInt64,
+			to:   math.MaxInt64,
+			matchers: []*client.LabelMatchers{{
+				Matchers: []*client.LabelMatcher{
+					{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
+				},
+			}},
+			expected: []*cortexpb.Metric{
+				{Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[0].lbls)},
+				{Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[1].lbls)},
+			},
+		},
+		"should filter metrics by multiple matchers": {
+			from: math.MinInt64,
+			to:   math.MaxInt64,
+			matchers: []*client.LabelMatchers{
+				{
+					Matchers: []*client.LabelMatcher{
+						{Type: client.EQUAL, Name: "status", Value: "200"},
+					},
+				},
+				{
+					Matchers: []*client.LabelMatcher{
+						{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_2"},
+					},
+				},
+			},
+			expected: []*cortexpb.Metric{
+				{Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[0].lbls)},
+				{Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[2].lbls)},
+			},
+		},
+		"should NOT filter metrics by time range to always return known metrics even when queried for older time ranges": {
+			from: 100,
+			to:   1000,
+			matchers: []*client.LabelMatchers{{
+				Matchers: []*client.LabelMatcher{
+					{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
+				},
+			}},
+			expected: []*cortexpb.Metric{
+				{Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[0].lbls)},
+				{Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[1].lbls)},
+			},
+		},
+		"should filter metrics by time range if queryIngestersWithin is enabled": {
+			from: 99999,
+			to:   100001,
+			matchers: []*client.LabelMatchers{{
+				Matchers: []*client.LabelMatcher{
+					{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
+				},
+			}},
+			expected: []*cortexpb.Metric{
+				{Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[0].lbls)},
+			},
+			queryIngestersWithin: time.Hour,
+		},
+		"should not return duplicated metrics on overlapping matchers": {
+			from: math.MinInt64,
+			to:   math.MaxInt64,
+			matchers: []*client.LabelMatchers{
+				{
+					Matchers: []*client.LabelMatcher{
+						{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"},
+					},
+				},
+				{
+					Matchers: []*client.LabelMatcher{
+						{Type: client.REGEX_MATCH, Name: model.MetricNameLabel, Value: "test.*"},
+					},
+				},
+			},
+			expected: []*cortexpb.Metric{
+				{Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[0].lbls)},
+				{Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[1].lbls)},
+				{Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[2].lbls)},
+			},
+		},
+		"should return all matching metrics even if their FastFingerprint collide": {
+			from: math.MinInt64,
+			to:   math.MaxInt64,
+			matchers: []*client.LabelMatchers{{
+				Matchers: []*client.LabelMatcher{
+					{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "collision"},
+				},
+			}},
+			expected: []*cortexpb.Metric{
+				{Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[3].lbls)},
+				{Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[4].lbls)},
+			},
+		},
+		"should return only limited results": {
+			from:  math.MinInt64,
+			to:    math.MaxInt64,
+			limit: 1,
+			matchers: []*client.LabelMatchers{
+				{
+					Matchers: []*client.LabelMatcher{
+						{Type: client.EQUAL, Name: "status", Value: "200"},
+					},
+				},
+				{
+					Matchers: []*client.LabelMatcher{
+						{Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_2"},
+					},
+				},
+			},
+			expected: []*cortexpb.Metric{
+				{Labels: cortexpb.FromLabelsToLabelAdapters(fixtures[0].lbls)},
+			},
+		},
+	}
+
+	// Create ingester
+	i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	// Push fixtures
+	ctx := user.InjectOrgID(context.Background(), "test")
+
+	for _, series := range fixtures {
+		req, _ := mockWriteRequestV2(t, series.lbls, series.value, series.timestamp)
+		_, err := i.PushV2(ctx, req)
+		require.NoError(t, err)
+	}
+
+	// Run tests
+	for testName, testData := range tests {
+		testData := testData
+
+		t.Run(testName, func(t *testing.T) {
+			req := &client.MetricsForLabelMatchersRequest{
+				StartTimestampMs: testData.from,
+				EndTimestampMs:   testData.to,
+				MatchersSet:      testData.matchers,
+				Limit:            testData.limit,
+			}
+			i.cfg.QueryIngestersWithin = testData.queryIngestersWithin
+			res, err := i.MetricsForLabelMatchers(ctx, req)
+			require.NoError(t, err)
+			assert.ElementsMatch(t, testData.expected, res.Metric)
+		})
+	}
+}
+
+func TestIngesterPRW2_MetricsForLabelMatchers_Deduplication(t *testing.T) {
+	const (
+		userID    = "test"
+		numSeries = 100000
+	)
+
+	now := util.TimeToMillis(time.Now())
+	i := createIngesterWithSeriesV2(t, userID, numSeries, 1, now, 1)
+	ctx := user.InjectOrgID(context.Background(), "test")
+
+	req := &client.MetricsForLabelMatchersRequest{
+		StartTimestampMs: now,
+		EndTimestampMs:   now,
+		// Overlapping matchers to make sure series are correctly deduplicated.
+		MatchersSet: []*client.LabelMatchers{
+			{Matchers: []*client.LabelMatcher{
+				{Type: client.REGEX_MATCH, Name: model.MetricNameLabel, Value: "test.*"},
+			}},
+			{Matchers: []*client.LabelMatcher{
+				{Type: client.REGEX_MATCH, Name: model.MetricNameLabel, Value: "test.*0"},
+			}},
+		},
+	}
+
+	res, err := i.MetricsForLabelMatchers(ctx, req)
+	require.NoError(t, err)
+	require.Len(t, res.GetMetric(), numSeries)
+}
+
+func BenchmarkIngesterPRW2_MetricsForLabelMatchers(b *testing.B) {
+	var (
+		userID              = "test"
+		numSeries           = 10000
+		numSamplesPerSeries = 60 * 6 // 6h on 1 sample per minute
+		startTimestamp      = util.TimeToMillis(time.Now())
+		step                = int64(60000) // 1 sample per minute
+	)
+
+	i := createIngesterWithSeriesV2(b, userID, numSeries, numSamplesPerSeries, startTimestamp, step)
+	ctx := user.InjectOrgID(context.Background(), "test")
+
+	// Flush the ingester to ensure blocks have been compacted, so we'll test
+	// fetching labels from blocks.
+	i.Flush()
+
+	b.ResetTimer()
+	b.ReportAllocs()
+
+	for n := 0; n < b.N; n++ {
+		req := &client.MetricsForLabelMatchersRequest{
+			StartTimestampMs: math.MinInt64,
+			EndTimestampMs:   math.MaxInt64,
+			MatchersSet: []*client.LabelMatchers{{Matchers: []*client.LabelMatcher{
+				{Type: client.REGEX_MATCH, Name: model.MetricNameLabel, Value: "test.*"},
+			}}},
+		}
+
+		res, err := i.MetricsForLabelMatchers(ctx, req)
+		require.NoError(b, err)
+		require.Len(b, res.GetMetric(), numSeries)
+	}
+}
+
+func TestIngesterPRW2_QueryStream(t *testing.T) {
+	// Create ingester.
+	cfg := defaultIngesterTestConfig(t)
+
+	for _, enc := range encodings {
+		t.Run(enc.String(), func(t *testing.T) {
+			i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry())
+			require.NoError(t, err)
+			require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+			defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+			// Wait until it's ACTIVE.
+			test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+				return i.lifecycler.GetState()
+			})
+
+			// Push series.
+			ctx := user.InjectOrgID(context.Background(), userID)
+			lbls := labels.Labels{{Name: labels.MetricName, Value: "foo"}}
+			var (
+				req                    *cortexpbv2.WriteRequest
+				expectedResponseChunks *client.QueryStreamResponse
+			)
+			switch enc {
+			case encoding.PrometheusXorChunk:
+				req, expectedResponseChunks = mockWriteRequestV2(t, lbls, 123000, 456)
+			case encoding.PrometheusHistogramChunk:
+				req, expectedResponseChunks = mockHistogramWriteRequestV2(t, lbls, 123000, 456, false)
+			case encoding.PrometheusFloatHistogramChunk:
+				req, expectedResponseChunks = mockHistogramWriteRequestV2(t, lbls, 123000, 456, true)
+			}
+			_, err = i.PushV2(ctx, req)
+			require.NoError(t, err)
+
+			// Create a GRPC server used to query back the data.
+			serv := grpc.NewServer(grpc.StreamInterceptor(middleware.StreamServerUserHeaderInterceptor))
+			defer serv.GracefulStop()
+			client.RegisterIngesterServer(serv, i)
+
+			listener, err := net.Listen("tcp", "localhost:0")
+			require.NoError(t, err)
+
+			go func() {
+				require.NoError(t, serv.Serve(listener))
+			}()
+
+			// Query back the series using GRPC streaming.
+			c, err := client.MakeIngesterClient(listener.Addr().String(), defaultClientTestConfig())
+			require.NoError(t, err)
+			defer c.Close()
+
+			queryRequest := &client.QueryRequest{
+				StartTimestampMs: 0,
+				EndTimestampMs:   200000,
+				Matchers: []*client.LabelMatcher{{
+					Type:  client.EQUAL,
+					Name:  model.MetricNameLabel,
+					Value: "foo",
+				}},
+			}
+
+			chunksTest := func(t *testing.T) {
+				s, err := c.QueryStream(ctx, queryRequest)
+				require.NoError(t, err)
+
+				count := 0
+				var lastResp *client.QueryStreamResponse
+				for {
+					resp, err := s.Recv()
+					if err == io.EOF {
+						break
+					}
+					require.NoError(t, err)
+					count += len(resp.Chunkseries)
+					lastResp = resp
+				}
+				require.Equal(t, 1, count)
+				require.Equal(t, expectedResponseChunks, lastResp)
+			}
+
+			t.Run("chunks", chunksTest)
+		})
+	}
+}
+
+func TestIngesterPRW2_QueryStreamManySamplesChunks(t *testing.T) {
+	// Create ingester.
+	cfg := defaultIngesterTestConfig(t)
+
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE.
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	// Push series.
+	ctx := user.InjectOrgID(context.Background(), userID)
+
+	const samplesCount = 1000000
+	samples := make([]cortexpbv2.Sample, 0, samplesCount)
+
+	for i := 0; i < samplesCount; i++ {
+		samples = append(samples, cortexpbv2.Sample{
+			Value:     float64(i),
+			Timestamp: int64(i),
+		})
+	}
+
+	// 100k samples in chunks use about 154 KiB,
+	_, err = i.PushV2(ctx, writeRequestSingleSeriesV2(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: "1"}}, samples[0:100000]))
+	require.NoError(t, err)
+
+	// 1M samples in chunks use about 1.51 MiB,
+	_, err = i.PushV2(ctx, writeRequestSingleSeriesV2(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: "2"}}, samples))
+	require.NoError(t, err)
+
+	// 500k samples in chunks need 775 KiB,
+	_, err = i.PushV2(ctx, writeRequestSingleSeriesV2(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: "3"}}, samples[0:500000]))
+	require.NoError(t, err)
+
+	// Create a GRPC server used to query back the data.
+	serv := grpc.NewServer(grpc.StreamInterceptor(middleware.StreamServerUserHeaderInterceptor))
+	defer serv.GracefulStop()
+	client.RegisterIngesterServer(serv, i)
+
+	listener, err := net.Listen("tcp", "localhost:0")
+	require.NoError(t, err)
+
+	go func() {
+		require.NoError(t, serv.Serve(listener))
+	}()
+
+	// Query back the series using GRPC streaming.
+	c, err := client.MakeIngesterClient(listener.Addr().String(), defaultClientTestConfig())
+	require.NoError(t, err)
+	defer c.Close()
+
+	s, err := c.QueryStream(ctx, &client.QueryRequest{
+		StartTimestampMs: 0,
+		EndTimestampMs:   samplesCount + 1,
+
+		Matchers: []*client.LabelMatcher{{
+			Type:  client.EQUAL,
+			Name:  model.MetricNameLabel,
+			Value: "foo",
+		}},
+	})
+	require.NoError(t, err)
+
+	recvMsgs := 0
+	series := 0
+	totalSamples := 0
+
+	for {
+		resp, err := s.Recv()
+		if err == io.EOF {
+			break
+		}
+		require.NoError(t, err)
+		require.True(t, len(resp.Chunkseries) > 0) // No empty messages.
+
+		recvMsgs++
+		series += len(resp.Chunkseries)
+
+		for _, ts := range resp.Chunkseries {
+			for _, c := range ts.Chunks {
+				enc := encoding.Encoding(c.Encoding).PromChunkEncoding()
+				require.True(t, enc != chunkenc.EncNone)
+				chk, err := chunkenc.FromData(enc, c.Data)
+				require.NoError(t, err)
+				totalSamples += chk.NumSamples()
+			}
+		}
+	}
+
+	// As ingester doesn't guarantee sorting of series, we can get 2 (100k + 500k in first, 1M in second)
+	// or 3 messages (100k or 500k first, 1M second, and 500k or 100k last).
+
+	require.True(t, 2 <= recvMsgs && recvMsgs <= 3)
+	require.Equal(t, 3, series)
+	require.Equal(t, 100000+500000+samplesCount, totalSamples)
+}
+
+func BenchmarkIngesterPRW2_QueryStream_Chunks(b *testing.B) {
+	benchmarkQueryStreamV2(b)
+}
+
+func benchmarkQueryStreamV2(b *testing.B) {
+	cfg := defaultIngesterTestConfig(b)
+
+	// Create ingester.
+	i, err := prepareIngesterWithBlocksStorage(b, cfg, prometheus.NewRegistry())
+	require.NoError(b, err)
+	require.NoError(b, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE.
+	test.Poll(b, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	// Push series.
+	ctx := user.InjectOrgID(context.Background(), userID)
+
+	const samplesCount = 1000
+	samples := make([]cortexpbv2.Sample, 0, samplesCount)
+
+	for i := 0; i < samplesCount; i++ {
+		samples = append(samples, cortexpbv2.Sample{
+			Value:     float64(i),
+			Timestamp: int64(i),
+		})
+	}
+
+	const seriesCount = 100
+	for s := 0; s < seriesCount; s++ {
+		_, err = i.PushV2(ctx, writeRequestSingleSeriesV2(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: strconv.Itoa(s)}}, samples))
+		require.NoError(b, err)
+	}
+
+	req := &client.QueryRequest{
+		StartTimestampMs: 0,
+		EndTimestampMs:   samplesCount + 1,
+
+		Matchers: []*client.LabelMatcher{{
+			Type:  client.EQUAL,
+			Name:  model.MetricNameLabel,
+			Value: "foo",
+		}},
+	}
+
+	mockStream := &mockQueryStreamServer{ctx: ctx}
+
+	b.ResetTimer()
+
+	for ix := 0; ix < b.N; ix++ {
+		err := i.QueryStream(req, mockStream)
+		require.NoError(b, err)
+	}
+}
+
+func TestIngesterPRW2_dontShipBlocksWhenTenantDeletionMarkerIsPresent(t *testing.T) {
+	cfg := defaultIngesterTestConfig(t)
+	cfg.LifecyclerConfig.JoinAfter = 0
+	cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 2
+
+	// Create ingester
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry())
+	require.NoError(t, err)
+
+	// Use in-memory bucket.
+	bucket := objstore.NewInMemBucket()
+
+	i.TSDBState.bucket = bucket
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	pushSingleSampleWithMetadataV2(t, i)
+	require.Equal(t, int64(1), i.TSDBState.seriesCount.Load())
+	i.compactBlocks(context.Background(), true, nil)
+	require.Equal(t, int64(0), i.TSDBState.seriesCount.Load())
+	i.shipBlocks(context.Background(), nil)
+
+	numObjects := len(bucket.Objects())
+	require.NotZero(t, numObjects)
+
+	require.NoError(t, cortex_tsdb.WriteTenantDeletionMark(context.Background(), objstore.WithNoopInstr(bucket), userID, cortex_tsdb.NewTenantDeletionMark(time.Now())))
+	numObjects++ // For deletion marker
+
+	db := i.getTSDB(userID)
+	require.NotNil(t, db)
+	db.lastDeletionMarkCheck.Store(0)
+
+	// After writing tenant deletion mark,
+	pushSingleSampleWithMetadataV2(t, i)
+	require.Equal(t, int64(1), i.TSDBState.seriesCount.Load())
+	i.compactBlocks(context.Background(), true, nil)
+	require.Equal(t, int64(0), i.TSDBState.seriesCount.Load())
+	i.shipBlocks(context.Background(), nil)
+
+	numObjectsAfterMarkingTenantForDeletion := len(bucket.Objects())
+	require.Equal(t, numObjects, numObjectsAfterMarkingTenantForDeletion)
+	require.Equal(t, tsdbTenantMarkedForDeletion, i.closeAndDeleteUserTSDBIfIdle(userID))
+}
+
+func TestIngesterPRW2_seriesCountIsCorrectAfterClosingTSDBForDeletedTenant(t *testing.T) {
+	cfg := defaultIngesterTestConfig(t)
+	cfg.LifecyclerConfig.JoinAfter = 0
+	cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 2
+
+	// Create ingester
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry())
+	require.NoError(t, err)
+
+	// Use in-memory bucket.
+	bucket := objstore.NewInMemBucket()
+
+	// Write tenant deletion mark.
+	require.NoError(t, cortex_tsdb.WriteTenantDeletionMark(context.Background(), objstore.WithNoopInstr(bucket), userID, cortex_tsdb.NewTenantDeletionMark(time.Now())))
+
+	i.TSDBState.bucket = bucket
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	pushSingleSampleWithMetadataV2(t, i)
+	require.Equal(t, int64(1), i.TSDBState.seriesCount.Load())
+
+	// We call shipBlocks to check for deletion marker (it happens inside this method).
+	i.shipBlocks(context.Background(), nil)
+
+	// Verify that tenant deletion mark was found.
+	db := i.getTSDB(userID)
+	require.NotNil(t, db)
+	require.True(t, db.deletionMarkFound.Load())
+
+	// If we try to close TSDB now, it should succeed, even though TSDB is not idle and empty.
+	require.Equal(t, uint64(1), db.Head().NumSeries())
+	require.Equal(t, tsdbTenantMarkedForDeletion, i.closeAndDeleteUserTSDBIfIdle(userID))
+
+	// Closing should decrease series count.
+	require.Equal(t, int64(0), i.TSDBState.seriesCount.Load())
+}
+
+func TestIngesterPRW2_invalidSamplesDontChangeLastUpdateTime(t *testing.T) {
+	cfg := defaultIngesterTestConfig(t)
+	cfg.LifecyclerConfig.JoinAfter = 0
+
+	// Create ingester
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry())
+	require.NoError(t, err)
+
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	ctx := user.InjectOrgID(context.Background(), userID)
+	sampleTimestamp := int64(model.Now())
+
+	{
+		req, _ := mockWriteRequestV2(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, sampleTimestamp)
+		_, err = i.PushV2(ctx, req)
+		require.NoError(t, err)
+	}
+
+	db := i.getTSDB(userID)
+	lastUpdate := db.lastUpdate.Load()
+
+	// Wait until 1 second passes.
+	test.Poll(t, 1*time.Second, time.Now().Unix()+1, func() interface{} {
+		return time.Now().Unix()
+	})
+
+	// Push another sample to the same metric and timestamp, with different value. We expect to get error.
+	{
+		req, _ := mockWriteRequestV2(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 1, sampleTimestamp)
+		_, err = i.PushV2(ctx, req)
+		require.Error(t, err)
+	}
+
+	// Make sure last update hasn't changed.
+	require.Equal(t, lastUpdate, db.lastUpdate.Load())
+}
+
+func TestIngesterPRW2_flushing(t *testing.T) {
+	for name, tc := range map[string]struct {
+		setupIngester func(cfg *Config)
+		action        func(t *testing.T, i *Ingester, reg *prometheus.Registry)
+	}{
+		"ingesterShutdown": {
+			setupIngester: func(cfg *Config) {
+				cfg.BlocksStorageConfig.TSDB.FlushBlocksOnShutdown = true
+				cfg.BlocksStorageConfig.TSDB.KeepUserTSDBOpenOnShutdown = true
+			},
+			action: func(t *testing.T, i *Ingester, reg *prometheus.Registry) {
+				pushSingleSampleWithMetadataV2(t, i)
+
+				// Nothing shipped yet.
+				require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
+					# HELP cortex_ingester_shipper_uploads_total Total number of uploaded TSDB blocks
+					# TYPE cortex_ingester_shipper_uploads_total counter
+					cortex_ingester_shipper_uploads_total 0
+				`), "cortex_ingester_shipper_uploads_total"))
+
+				// Shutdown ingester. This triggers flushing of the block.
+				require.NoError(t, services.StopAndAwaitTerminated(context.Background(), i))
+
+				verifyCompactedHead(t, i, true)
+
+				// Verify that block has been shipped.
+				require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
+					# HELP cortex_ingester_shipper_uploads_total Total number of uploaded TSDB blocks
+					# TYPE cortex_ingester_shipper_uploads_total counter
+					cortex_ingester_shipper_uploads_total 1
+				`), "cortex_ingester_shipper_uploads_total"))
+			},
+		},
+
+		"shutdownHandler": {
+			setupIngester: func(cfg *Config) {
+				cfg.BlocksStorageConfig.TSDB.FlushBlocksOnShutdown = false
+				cfg.BlocksStorageConfig.TSDB.KeepUserTSDBOpenOnShutdown = true
+			},
+
+			action: func(t *testing.T, i *Ingester, reg *prometheus.Registry) {
+				pushSingleSampleWithMetadataV2(t, i)
+
+				// Nothing shipped yet.
+				require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
+		# HELP cortex_ingester_shipper_uploads_total Total number of uploaded TSDB blocks
+		# TYPE cortex_ingester_shipper_uploads_total counter
+		cortex_ingester_shipper_uploads_total 0
+	`), "cortex_ingester_shipper_uploads_total"))
+
+				i.ShutdownHandler(httptest.NewRecorder(), httptest.NewRequest("POST", "/shutdown", nil))
+
+				verifyCompactedHead(t, i, true)
+				require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
+		# HELP cortex_ingester_shipper_uploads_total Total number of uploaded TSDB blocks
+		# TYPE cortex_ingester_shipper_uploads_total counter
+		cortex_ingester_shipper_uploads_total 1
+	`), "cortex_ingester_shipper_uploads_total"))
+			},
+		},
+
+		"flushHandler": {
+			setupIngester: func(cfg *Config) {
+				cfg.BlocksStorageConfig.TSDB.FlushBlocksOnShutdown = false
+			},
+
+			action: func(t *testing.T, i *Ingester, reg *prometheus.Registry) {
+				pushSingleSampleWithMetadataV2(t, i)
+
+				// Nothing shipped yet.
+				require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
+					# HELP cortex_ingester_shipper_uploads_total Total number of uploaded TSDB blocks
+					# TYPE cortex_ingester_shipper_uploads_total counter
+					cortex_ingester_shipper_uploads_total 0
+				`), "cortex_ingester_shipper_uploads_total"))
+
+				// Using wait=true makes this a synchronous call.
+				i.FlushHandler(httptest.NewRecorder(), httptest.NewRequest("POST", "/flush?wait=true", nil))
+
+				verifyCompactedHead(t, i, true)
+				require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
+					# HELP cortex_ingester_shipper_uploads_total Total number of uploaded TSDB blocks
+					# TYPE cortex_ingester_shipper_uploads_total counter
+					cortex_ingester_shipper_uploads_total 1
+				`), "cortex_ingester_shipper_uploads_total"))
+			},
+		},
+
+		"flushHandlerWithListOfTenants": {
+			setupIngester: func(cfg *Config) {
+				cfg.BlocksStorageConfig.TSDB.FlushBlocksOnShutdown = false
+			},
+
+			action: func(t *testing.T, i *Ingester, reg *prometheus.Registry) {
+				pushSingleSampleWithMetadataV2(t, i)
+
+				// Nothing shipped yet.
+				require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
+					# HELP cortex_ingester_shipper_uploads_total Total number of uploaded TSDB blocks
+					# TYPE cortex_ingester_shipper_uploads_total counter
+					cortex_ingester_shipper_uploads_total 0
+				`), "cortex_ingester_shipper_uploads_total"))
+
+				users := url.Values{}
+				users.Add(tenantParam, "unknown-user")
+				users.Add(tenantParam, "another-unknown-user")
+
+				// Using wait=true makes this a synchronous call.
+				i.FlushHandler(httptest.NewRecorder(), httptest.NewRequest("POST", "/flush?wait=true&"+users.Encode(), nil))
+
+				// Still nothing shipped or compacted.
+				require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
+					# HELP cortex_ingester_shipper_uploads_total Total number of uploaded TSDB blocks
+					# TYPE cortex_ingester_shipper_uploads_total counter
+					cortex_ingester_shipper_uploads_total 0
+				`), "cortex_ingester_shipper_uploads_total"))
+				verifyCompactedHead(t, i, false)
+
+				users = url.Values{}
+				users.Add(tenantParam, "different-user")
+				users.Add(tenantParam, userID) // Our user
+				users.Add(tenantParam, "yet-another-user")
+
+				i.FlushHandler(httptest.NewRecorder(), httptest.NewRequest("POST", "/flush?wait=true&"+users.Encode(), nil))
+
+				verifyCompactedHead(t, i, true)
+				require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
+					# HELP cortex_ingester_shipper_uploads_total Total number of uploaded TSDB blocks
+					# TYPE cortex_ingester_shipper_uploads_total counter
+					cortex_ingester_shipper_uploads_total 1
+				`), "cortex_ingester_shipper_uploads_total"))
+			},
+		},
+
+		"flushMultipleBlocksWithDataSpanning3Days": {
+			setupIngester: func(cfg *Config) {
+				cfg.BlocksStorageConfig.TSDB.FlushBlocksOnShutdown = false
+			},
+
+			action: func(t *testing.T, i *Ingester, reg *prometheus.Registry) {
+				// Pushing 5 samples, spanning over 3 days.
+				// First block
+				pushSingleSampleAtTimeV2(t, i, 23*time.Hour.Milliseconds())
+				pushSingleSampleAtTimeV2(t, i, 24*time.Hour.Milliseconds()-1)
+
+				// Second block
+				pushSingleSampleAtTimeV2(t, i, 24*time.Hour.Milliseconds()+1)
+				pushSingleSampleAtTimeV2(t, i, 25*time.Hour.Milliseconds())
+
+				// Third block, far in the future.
+				pushSingleSampleAtTimeV2(t, i, 50*time.Hour.Milliseconds())
+
+				// Nothing shipped yet.
+				require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
+					# HELP cortex_ingester_shipper_uploads_total Total number of uploaded TSDB blocks
+					# TYPE cortex_ingester_shipper_uploads_total counter
+					cortex_ingester_shipper_uploads_total 0
+				`), "cortex_ingester_shipper_uploads_total"))
+
+				i.FlushHandler(httptest.NewRecorder(), httptest.NewRequest("POST", "/flush?wait=true", nil))
+
+				verifyCompactedHead(t, i, true)
+
+				require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
+					# HELP cortex_ingester_shipper_uploads_total Total number of uploaded TSDB blocks
+					# TYPE cortex_ingester_shipper_uploads_total counter
+					cortex_ingester_shipper_uploads_total 3
+				`), "cortex_ingester_shipper_uploads_total"))
+
+				userDB := i.getTSDB(userID)
+				require.NotNil(t, userDB)
+
+				blocks := userDB.Blocks()
+				require.Equal(t, 3, len(blocks))
+				require.Equal(t, 23*time.Hour.Milliseconds(), blocks[0].Meta().MinTime)
+				require.Equal(t, 24*time.Hour.Milliseconds(), blocks[0].Meta().MaxTime) // Block maxt is exclusive.
+
+				require.Equal(t, 24*time.Hour.Milliseconds()+1, blocks[1].Meta().MinTime)
+				require.Equal(t, 26*time.Hour.Milliseconds(), blocks[1].Meta().MaxTime)
+
+				require.Equal(t, 50*time.Hour.Milliseconds()+1, blocks[2].Meta().MaxTime) // Block maxt is exclusive.
+			},
+		},
+	} {
+		t.Run(name, func(t *testing.T) {
+			cfg := defaultIngesterTestConfig(t)
+			cfg.LifecyclerConfig.JoinAfter = 0
+			cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 1
+			cfg.BlocksStorageConfig.TSDB.ShipInterval = 1 * time.Minute // Long enough to not be reached during the test.
+
+			if tc.setupIngester != nil {
+				tc.setupIngester(&cfg)
+			}
+
+			// Create ingester
+			reg := prometheus.NewPedanticRegistry()
+			i, err := prepareIngesterWithBlocksStorage(t, cfg, reg)
+			require.NoError(t, err)
+
+			require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+			t.Cleanup(func() {
+				_ = services.StopAndAwaitTerminated(context.Background(), i)
+			})
+
+			// Wait until it's ACTIVE
+			test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+				return i.lifecycler.GetState()
+			})
+
+			// mock user's shipper
+			tc.action(t, i, reg)
+		})
+	}
+}
+
+func TestIngesterPRW2_ForFlush(t *testing.T) {
+	cfg := defaultIngesterTestConfig(t)
+	cfg.LifecyclerConfig.JoinAfter = 0
+	cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 1
+	cfg.BlocksStorageConfig.TSDB.ShipInterval = 10 * time.Minute // Long enough to not be reached during the test.
+
+	// Create ingester
+	reg := prometheus.NewPedanticRegistry()
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, reg)
+	require.NoError(t, err)
+
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	t.Cleanup(func() {
+		_ = services.StopAndAwaitTerminated(context.Background(), i)
+	})
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	// Push some data.
+	pushSingleSampleWithMetadataV2(t, i)
+
+	// Stop ingester.
+	require.NoError(t, services.StopAndAwaitTerminated(context.Background(), i))
+
+	// Nothing shipped yet.
+	require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
+		# HELP cortex_ingester_shipper_uploads_total Total number of uploaded TSDB blocks
+		# TYPE cortex_ingester_shipper_uploads_total counter
+		cortex_ingester_shipper_uploads_total 0
+	`), "cortex_ingester_shipper_uploads_total"))
+
+	// Restart ingester in "For Flusher" mode. We reuse the same config (esp. same dir)
+	reg = prometheus.NewPedanticRegistry()
+	i, err = NewForFlusher(i.cfg, i.limits, reg, log.NewNopLogger())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+
+	// Our single sample should be reloaded from WAL
+	verifyCompactedHead(t, i, false)
+	i.Flush()
+
+	// Head should be empty after flushing.
+	verifyCompactedHead(t, i, true)
+
+	// Verify that block has been shipped.
+	require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
+		# HELP cortex_ingester_shipper_uploads_total Total number of uploaded TSDB blocks
+		# TYPE cortex_ingester_shipper_uploads_total counter
+		cortex_ingester_shipper_uploads_total 1
+	`), "cortex_ingester_shipper_uploads_total"))
+
+	require.NoError(t, services.StopAndAwaitTerminated(context.Background(), i))
+}
+
+func TestIngesterPRW2_UserStats(t *testing.T) {
+	series := []struct {
+		lbls      labels.Labels
+		value     float64
+		timestamp int64
+	}{
+		{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}, {Name: "route", Value: "get_user"}}, 1, 100000},
+		{labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "500"}, {Name: "route", Value: "get_user"}}, 1, 110000},
+		{labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000},
+	}
+
+	// Create ingester
+	i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	// Push series
+	ctx := user.InjectOrgID(context.Background(), "test")
+
+	for _, series := range series {
+		req, _ := mockWriteRequestV2(t, series.lbls, series.value, series.timestamp)
+		_, err := i.PushV2(ctx, req)
+		require.NoError(t, err)
+	}
+
+	// force update statistics
+	for _, db := range i.TSDBState.dbs {
+		db.ingestedAPISamples.Tick()
+		db.ingestedRuleSamples.Tick()
+	}
+
+	// Get label names
+	res, err := i.UserStats(ctx, &client.UserStatsRequest{})
+	require.NoError(t, err)
+	assert.InDelta(t, 0.2, res.ApiIngestionRate, 0.0001)
+	assert.InDelta(t, float64(0), res.RuleIngestionRate, 0.0001)
+	assert.Equal(t, uint64(3), res.NumSeries)
+}
+
+func TestIngesterPRW2_AllUserStats(t *testing.T) {
+	series := []struct {
+		user      string
+		lbls      labels.Labels
+		value     float64
+		timestamp int64
+	}{
+		{"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "status", Value: "200"}, {Name: "route", Value: "get_user"}}, 1, 100000},
+		{"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "status", Value: "500"}, {Name: "route", Value: "get_user"}}, 1, 110000},
+		{"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_2"}}, 2, 200000},
+		{"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_1"}}, 2, 200000},
+		{"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_2"}}, 2, 200000},
+	}
+
+	// Create ingester
+	i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+	for _, series := range series {
+		ctx := user.InjectOrgID(context.Background(), series.user)
+		req, _ := mockWriteRequestV2(t, series.lbls, series.value, series.timestamp)
+		_, err := i.PushV2(ctx, req)
+		require.NoError(t, err)
+	}
+
+	// force update statistics
+	for _, db := range i.TSDBState.dbs {
+		db.ingestedAPISamples.Tick()
+		db.ingestedRuleSamples.Tick()
+	}
+
+	// Get label names
+	res, err := i.AllUserStats(context.Background(), &client.UserStatsRequest{})
+	require.NoError(t, err)
+
+	expect := []*client.UserIDStatsResponse{
+		{
+			UserId: "user-1",
+			Data: &client.UserStatsResponse{
+				IngestionRate:     0.2,
+				NumSeries:         3,
+				ApiIngestionRate:  0.2,
+				RuleIngestionRate: 0,
+				ActiveSeries:      3,
+				LoadedBlocks:      0,
+			},
+		},
+		{
+			UserId: "user-2",
+			Data: &client.UserStatsResponse{
+				IngestionRate:     0.13333333333333333,
+				NumSeries:         2,
+				ApiIngestionRate:  0.13333333333333333,
+				RuleIngestionRate: 0,
+				ActiveSeries:      2,
+				LoadedBlocks:      0,
+			},
+		},
+	}
+	assert.ElementsMatch(t, expect, res.Stats)
+}
+
+func TestIngesterPRW2_AllUserStatsHandler(t *testing.T) {
+	series := []struct {
+		user      string
+		lbls      labels.Labels
+		value     float64
+		timestamp int64
+	}{
+		{"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "status", Value: "200"}, {Name: "route", Value: "get_user"}}, 1, 100000},
+		{"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "status", Value: "500"}, {Name: "route", Value: "get_user"}}, 1, 110000},
+		{"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_2"}}, 2, 200000},
+		{"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_1"}}, 2, 200000},
+		{"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_2"}}, 2, 200000},
+	}
+
+	// Create ingester
+	i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+	for _, series := range series {
+		ctx := user.InjectOrgID(context.Background(), series.user)
+		req, _ := mockWriteRequestV2(t, series.lbls, series.value, series.timestamp)
+		_, err := i.PushV2(ctx, req)
+		require.NoError(t, err)
+	}
+
+	// Force compaction to test loaded blocks
+	compactionCallbackCh := make(chan struct{})
+	i.TSDBState.forceCompactTrigger <- requestWithUsersAndCallback{users: nil, callback: compactionCallbackCh}
+	<-compactionCallbackCh
+
+	// force update statistics
+	for _, db := range i.TSDBState.dbs {
+		db.ingestedAPISamples.Tick()
+		db.ingestedRuleSamples.Tick()
+	}
+
+	// Get label names
+	response := httptest.NewRecorder()
+	request := httptest.NewRequest("GET", "/all_user_stats", nil)
+	request.Header.Add("Accept", "application/json")
+	i.AllUserStatsHandler(response, request)
+	var resp UserStatsByTimeseries
+	err = json.Unmarshal(response.Body.Bytes(), &resp)
+	require.NoError(t, err)
+
+	expect := UserStatsByTimeseries{
+		{
+			UserID: "user-1",
+			UserStats: UserStats{
+				IngestionRate:     0.2,
+				NumSeries:         0,
+				APIIngestionRate:  0.2,
+				RuleIngestionRate: 0,
+				ActiveSeries:      3,
+				LoadedBlocks:      1,
+			},
+		},
+		{
+			UserID: "user-2",
+			UserStats: UserStats{
+				IngestionRate:     0.13333333333333333,
+				NumSeries:         0,
+				APIIngestionRate:  0.13333333333333333,
+				RuleIngestionRate: 0,
+				ActiveSeries:      2,
+				LoadedBlocks:      1,
+			},
+		},
+	}
+	assert.ElementsMatch(t, expect, resp)
+}
+
+func TestIngesterPRW2_CompactIdleBlock(t *testing.T) {
+	cfg := defaultIngesterTestConfig(t)
+	cfg.LifecyclerConfig.JoinAfter = 0
+	cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 1
+	cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval = 1 * time.Hour      // Long enough to not be reached during the test.
+	cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout = 1 * time.Second // Testing this.
+
+	r := prometheus.NewRegistry()
+
+	// Create ingester
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, r)
+	require.NoError(t, err)
+
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	t.Cleanup(func() {
+		_ = services.StopAndAwaitTerminated(context.Background(), i)
+	})
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	pushSingleSampleWithMetadataV2(t, i)
+
+	i.compactBlocks(context.Background(), false, nil)
+	verifyCompactedHead(t, i, false)
+	require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(`
+		# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+		# TYPE cortex_ingester_memory_series_created_total counter
+		cortex_ingester_memory_series_created_total{user="1"} 1
+
+		# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+		# TYPE cortex_ingester_memory_series_removed_total counter
+		cortex_ingester_memory_series_removed_total{user="1"} 0
+
+		# HELP cortex_ingester_memory_users The current number of users in memory.
+		# TYPE cortex_ingester_memory_users gauge
+		cortex_ingester_memory_users 1
+    `), memSeriesCreatedTotalName, memSeriesRemovedTotalName, "cortex_ingester_memory_users"))
+
+	// wait one second (plus maximum jitter) -- TSDB is now idle.
+	time.Sleep(time.Duration(float64(cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout) * (1 + compactionIdleTimeoutJitter)))
+
+	i.compactBlocks(context.Background(), false, nil)
+	verifyCompactedHead(t, i, true)
+	require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(`
+		# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+		# TYPE cortex_ingester_memory_series_created_total counter
+		cortex_ingester_memory_series_created_total{user="1"} 1
+
+		# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+		# TYPE cortex_ingester_memory_series_removed_total counter
+		cortex_ingester_memory_series_removed_total{user="1"} 1
+
+		# HELP cortex_ingester_memory_users The current number of users in memory.
+		# TYPE cortex_ingester_memory_users gauge
+		cortex_ingester_memory_users 1
+    `), memSeriesCreatedTotalName, memSeriesRemovedTotalName, "cortex_ingester_memory_users"))
+
+	// Pushing another sample still works.
+	pushSingleSampleWithMetadataV2(t, i)
+	verifyCompactedHead(t, i, false)
+
+	require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(`
+		# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+		# TYPE cortex_ingester_memory_series_created_total counter
+		cortex_ingester_memory_series_created_total{user="1"} 2
+
+		# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+		# TYPE cortex_ingester_memory_series_removed_total counter
+		cortex_ingester_memory_series_removed_total{user="1"} 1
+
+		# HELP cortex_ingester_memory_users The current number of users in memory.
+		# TYPE cortex_ingester_memory_users gauge
+		cortex_ingester_memory_users 1
+    `), memSeriesCreatedTotalName, memSeriesRemovedTotalName, "cortex_ingester_memory_users"))
+}
+
+func TestIngesterPRW2_CompactAndCloseIdleTSDB(t *testing.T) {
+	cfg := defaultIngesterTestConfig(t)
+	cfg.LifecyclerConfig.JoinAfter = 0
+	cfg.BlocksStorageConfig.TSDB.ShipInterval = 1 * time.Second // Required to enable shipping.
+	cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 1
+	cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval = 1 * time.Second
+	cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout = 1 * time.Second
+	cfg.BlocksStorageConfig.TSDB.CloseIdleTSDBTimeout = 1 * time.Second
+	cfg.BlocksStorageConfig.TSDB.CloseIdleTSDBInterval = 100 * time.Millisecond
+
+	r := prometheus.NewRegistry()
+
+	// Create ingester
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, r)
+	require.NoError(t, err)
+
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	t.Cleanup(func() {
+		require.NoError(t, services.StopAndAwaitTerminated(context.Background(), i))
+	})
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	pushSingleSampleWithMetadataV2(t, i)
+	i.updateActiveSeries(context.Background())
+
+	require.Equal(t, int64(1), i.TSDBState.seriesCount.Load())
+
+	userMetrics := []string{memSeriesCreatedTotalName, memSeriesRemovedTotalName, "cortex_ingester_active_series"}
+
+	globalMetrics := []string{"cortex_ingester_memory_users", "cortex_ingester_memory_metadata"}
+	metricsToCheck := append(userMetrics, globalMetrics...)
+
+	require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(`
+		# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+		# TYPE cortex_ingester_memory_series_created_total counter
+		cortex_ingester_memory_series_created_total{user="1"} 1
+
+		# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+		# TYPE cortex_ingester_memory_series_removed_total counter
+		cortex_ingester_memory_series_removed_total{user="1"} 0
+
+		# HELP cortex_ingester_memory_users The current number of users in memory.
+		# TYPE cortex_ingester_memory_users gauge
+		cortex_ingester_memory_users 1
+
+		# HELP cortex_ingester_active_series Number of currently active series per user.
+		# TYPE cortex_ingester_active_series gauge
+		cortex_ingester_active_series{user="1"} 1
+
+		# HELP cortex_ingester_memory_metadata The current number of metadata in memory.
+		# TYPE cortex_ingester_memory_metadata gauge
+		cortex_ingester_memory_metadata 1
+
+		# HELP cortex_ingester_memory_metadata_created_total The total number of metadata that were created per user
+		# TYPE cortex_ingester_memory_metadata_created_total counter
+		cortex_ingester_memory_metadata_created_total{user="1"} 1
+    `), metricsToCheck...))
+
+	// Wait until TSDB has been closed and removed.
+	test.Poll(t, 10*time.Second, 0, func() interface{} {
+		i.stoppedMtx.Lock()
+		defer i.stoppedMtx.Unlock()
+		return len(i.TSDBState.dbs)
+	})
+
+	require.Greater(t, testutil.ToFloat64(i.TSDBState.idleTsdbChecks.WithLabelValues(string(tsdbIdleClosed))), float64(0))
+	i.updateActiveSeries(context.Background())
+	require.Equal(t, int64(0), i.TSDBState.seriesCount.Load()) // Flushing removed all series from memory.
+
+	// Verify that user has disappeared from metrics.
+	require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(""), userMetrics...))
+
+	require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(`
+		# HELP cortex_ingester_memory_users The current number of users in memory.
+		# TYPE cortex_ingester_memory_users gauge
+		cortex_ingester_memory_users 0
+
+		# HELP cortex_ingester_memory_metadata The current number of metadata in memory.
+		# TYPE cortex_ingester_memory_metadata gauge
+		cortex_ingester_memory_metadata 0
+    `), "cortex_ingester_memory_users", "cortex_ingester_memory_metadata"))
+
+	// Pushing another sample will recreate TSDB.
+	pushSingleSampleWithMetadataV2(t, i)
+	i.updateActiveSeries(context.Background())
+
+	// User is back.
+	require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(`
+		# HELP cortex_ingester_memory_series_created_total The total number of series that were created per user.
+		# TYPE cortex_ingester_memory_series_created_total counter
+		cortex_ingester_memory_series_created_total{user="1"} 1
+
+		# HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user.
+		# TYPE cortex_ingester_memory_series_removed_total counter
+		cortex_ingester_memory_series_removed_total{user="1"} 0
+
+		# HELP cortex_ingester_memory_users The current number of users in memory.
+		# TYPE cortex_ingester_memory_users gauge
+		cortex_ingester_memory_users 1
+
+		# HELP cortex_ingester_active_series Number of currently active series per user.
+		# TYPE cortex_ingester_active_series gauge
+		cortex_ingester_active_series{user="1"} 1
+
+		# HELP cortex_ingester_memory_metadata The current number of metadata in memory.
+		# TYPE cortex_ingester_memory_metadata gauge
+		cortex_ingester_memory_metadata 1
+
+		# HELP cortex_ingester_memory_metadata_created_total The total number of metadata that were created per user
+		# TYPE cortex_ingester_memory_metadata_created_total counter
+		cortex_ingester_memory_metadata_created_total{user="1"} 1
+    `), metricsToCheck...))
+}
+
+func TestIngesterPRW2_CloseTSDBsOnShutdown(t *testing.T) {
+	cfg := defaultIngesterTestConfig(t)
+	cfg.LifecyclerConfig.JoinAfter = 0
+
+	// Create ingester
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry())
+	require.NoError(t, err)
+
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	t.Cleanup(func() {
+		_ = services.StopAndAwaitTerminated(context.Background(), i)
+	})
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	// Push some data.
+	pushSingleSampleWithMetadataV2(t, i)
+
+	db := i.getTSDB(userID)
+	require.NotNil(t, db)
+
+	// Stop ingester.
+	require.NoError(t, services.StopAndAwaitTerminated(context.Background(), i))
+
+	// Verify that DB is no longer in memory, but was closed
+	db = i.getTSDB(userID)
+	require.Nil(t, db)
+}
+
+func TestIngesterPRW2_NotDeleteUnshippedBlocks(t *testing.T) {
+	chunkRange := 2 * time.Hour
+	chunkRangeMilliSec := chunkRange.Milliseconds()
+	cfg := defaultIngesterTestConfig(t)
+	cfg.BlocksStorageConfig.TSDB.BlockRanges = []time.Duration{chunkRange}
+	cfg.BlocksStorageConfig.TSDB.Retention = time.Millisecond // Which means delete all but first block.
+	cfg.LifecyclerConfig.JoinAfter = 0
+
+	// Create ingester
+	reg := prometheus.NewPedanticRegistry()
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, reg)
+	require.NoError(t, err)
+
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	t.Cleanup(func() {
+		_ = services.StopAndAwaitTerminated(context.Background(), i)
+	})
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(`
+		# HELP cortex_ingester_oldest_unshipped_block_timestamp_seconds Unix timestamp of the oldest TSDB block not shipped to the storage yet. 0 if ingester has no blocks or all blocks have been shipped.
+		# TYPE cortex_ingester_oldest_unshipped_block_timestamp_seconds gauge
+		cortex_ingester_oldest_unshipped_block_timestamp_seconds 0
+	`), "cortex_ingester_oldest_unshipped_block_timestamp_seconds"))
+
+	// Push some data to create 3 blocks.
+	ctx := user.InjectOrgID(context.Background(), userID)
+	for j := int64(0); j < 5; j++ {
+		req, _ := mockWriteRequestV2(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, j*chunkRangeMilliSec)
+		_, err := i.PushV2(ctx, req)
+		require.NoError(t, err)
+	}
+
+	db := i.getTSDB(userID)
+	require.NotNil(t, db)
+	require.Nil(t, db.Compact(ctx))
+
+	oldBlocks := db.Blocks()
+	require.Equal(t, 3, len(oldBlocks))
+
+	require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(fmt.Sprintf(`
+		# HELP cortex_ingester_oldest_unshipped_block_timestamp_seconds Unix timestamp of the oldest TSDB block not shipped to the storage yet. 0 if ingester has no blocks or all blocks have been shipped.
+		# TYPE cortex_ingester_oldest_unshipped_block_timestamp_seconds gauge
+		cortex_ingester_oldest_unshipped_block_timestamp_seconds %d
+	`, oldBlocks[0].Meta().ULID.Time()/1000)), "cortex_ingester_oldest_unshipped_block_timestamp_seconds"))
+
+	// Saying that we have shipped the second block, so only that should get deleted.
+	require.Nil(t, shipper.WriteMetaFile(nil, db.shipperMetadataFilePath, &shipper.Meta{
+		Version:  shipper.MetaVersion1,
+		Uploaded: []ulid.ULID{oldBlocks[1].Meta().ULID},
+	}))
+	require.NoError(t, db.updateCachedShippedBlocks())
+
+	// Add more samples that could trigger another compaction and hence reload of blocks.
+	for j := int64(5); j < 6; j++ {
+		req, _ := mockWriteRequestV2(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, j*chunkRangeMilliSec)
+		_, err := i.PushV2(ctx, req)
+		require.NoError(t, err)
+	}
+	require.Nil(t, db.Compact(ctx))
+
+	// Only the second block should be gone along with a new block.
+	newBlocks := db.Blocks()
+	require.Equal(t, 3, len(newBlocks))
+	require.Equal(t, oldBlocks[0].Meta().ULID, newBlocks[0].Meta().ULID)    // First block remains same.
+	require.Equal(t, oldBlocks[2].Meta().ULID, newBlocks[1].Meta().ULID)    // 3rd block becomes 2nd now.
+	require.NotEqual(t, oldBlocks[1].Meta().ULID, newBlocks[2].Meta().ULID) // The new block won't match previous 2nd block.
+
+	require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(fmt.Sprintf(`
+		# HELP cortex_ingester_oldest_unshipped_block_timestamp_seconds Unix timestamp of the oldest TSDB block not shipped to the storage yet. 0 if ingester has no blocks or all blocks have been shipped.
+		# TYPE cortex_ingester_oldest_unshipped_block_timestamp_seconds gauge
+		cortex_ingester_oldest_unshipped_block_timestamp_seconds %d
+	`, newBlocks[0].Meta().ULID.Time()/1000)), "cortex_ingester_oldest_unshipped_block_timestamp_seconds"))
+
+	// Shipping 2 more blocks, hence all the blocks from first round.
+	require.Nil(t, shipper.WriteMetaFile(nil, db.shipperMetadataFilePath, &shipper.Meta{
+		Version:  shipper.MetaVersion1,
+		Uploaded: []ulid.ULID{oldBlocks[1].Meta().ULID, newBlocks[0].Meta().ULID, newBlocks[1].Meta().ULID},
+	}))
+	require.NoError(t, db.updateCachedShippedBlocks())
+
+	// Add more samples that could trigger another compaction and hence reload of blocks.
+	for j := int64(6); j < 7; j++ {
+		req, _ := mockWriteRequestV2(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, j*chunkRangeMilliSec)
+		_, err := i.PushV2(ctx, req)
+		require.NoError(t, err)
+	}
+	require.Nil(t, db.Compact(ctx))
+
+	// All blocks from the old blocks should be gone now.
+	newBlocks2 := db.Blocks()
+	require.Equal(t, 2, len(newBlocks2))
+
+	require.Equal(t, newBlocks[2].Meta().ULID, newBlocks2[0].Meta().ULID) // Block created in last round.
+	for _, b := range oldBlocks {
+		// Second block is not one among old blocks.
+		require.NotEqual(t, b.Meta().ULID, newBlocks2[1].Meta().ULID)
+	}
+
+	require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(fmt.Sprintf(`
+		# HELP cortex_ingester_oldest_unshipped_block_timestamp_seconds Unix timestamp of the oldest TSDB block not shipped to the storage yet. 0 if ingester has no blocks or all blocks have been shipped.
+		# TYPE cortex_ingester_oldest_unshipped_block_timestamp_seconds gauge
+		cortex_ingester_oldest_unshipped_block_timestamp_seconds %d
+	`, newBlocks2[0].Meta().ULID.Time()/1000)), "cortex_ingester_oldest_unshipped_block_timestamp_seconds"))
+}
+
+func TestIngesterPRW2_PushErrorDuringForcedCompaction(t *testing.T) {
+	i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry())
+	require.NoError(t, err)
+
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	t.Cleanup(func() {
+		_ = services.StopAndAwaitTerminated(context.Background(), i)
+	})
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	// Push a sample, it should succeed.
+	pushSingleSampleWithMetadataV2(t, i)
+
+	// We mock a flushing by setting the boolean.
+	db := i.getTSDB(userID)
+	require.NotNil(t, db)
+	require.True(t, db.casState(active, forceCompacting))
+
+	// Ingestion should fail with a 503.
+	req, _ := mockWriteRequestV2(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, util.TimeToMillis(time.Now()))
+	ctx := user.InjectOrgID(context.Background(), userID)
+	_, err = i.PushV2(ctx, req)
+	require.Equal(t, httpgrpc.Errorf(http.StatusServiceUnavailable, wrapWithUser(errors.New("forced compaction in progress"), userID).Error()), err)
+
+	// Ingestion is successful after a flush.
+	require.True(t, db.casState(forceCompacting, active))
+	pushSingleSampleWithMetadata(t, i)
+}
+
+func TestIngesterPRW2_NoFlushWithInFlightRequest(t *testing.T) {
+	registry := prometheus.NewRegistry()
+	i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), registry)
+	require.NoError(t, err)
+
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	t.Cleanup(func() {
+		_ = services.StopAndAwaitTerminated(context.Background(), i)
+	})
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	// Push few samples.
+	for j := 0; j < 5; j++ {
+		pushSingleSampleWithMetadataV2(t, i)
+	}
+
+	// Verifying that compaction won't happen when a request is in flight.
+
+	// This mocks a request in flight.
+	db := i.getTSDB(userID)
+	require.NoError(t, db.acquireAppendLock())
+
+	// Flush handler only triggers compactions, but doesn't wait for them to finish. We cannot use ?wait=true here,
+	// because it would deadlock -- flush will wait for appendLock to be released.
+	i.FlushHandler(httptest.NewRecorder(), httptest.NewRequest("POST", "/flush", nil))
+
+	// Flushing should not have succeeded even after 5 seconds.
+	time.Sleep(5 * time.Second)
+	require.NoError(t, testutil.GatherAndCompare(registry, strings.NewReader(`
+		# HELP cortex_ingester_tsdb_compactions_total Total number of TSDB compactions that were executed.
+		# TYPE cortex_ingester_tsdb_compactions_total counter
+		cortex_ingester_tsdb_compactions_total 0
+	`), "cortex_ingester_tsdb_compactions_total"))
+
+	// No requests in flight after this.
+	db.releaseAppendLock()
+
+	// Let's wait until all head series have been flushed.
+	test.Poll(t, 5*time.Second, uint64(0), func() interface{} {
+		db := i.getTSDB(userID)
+		if db == nil {
+			return false
+		}
+		return db.Head().NumSeries()
+	})
+
+	require.NoError(t, testutil.GatherAndCompare(registry, strings.NewReader(`
+		# HELP cortex_ingester_tsdb_compactions_total Total number of TSDB compactions that were executed.
+		# TYPE cortex_ingester_tsdb_compactions_total counter
+		cortex_ingester_tsdb_compactions_total 1
+	`), "cortex_ingester_tsdb_compactions_total"))
+}
+
+func TestIngesterPRW2_PushInstanceLimits(t *testing.T) {
+	tests := map[string]struct {
+		limits          InstanceLimits
+		reqs            map[string][]*cortexpbv2.WriteRequest
+		expectedErr     error
+		expectedErrType interface{}
+	}{
+		"should succeed creating one user and series": {
+			limits: InstanceLimits{MaxInMemorySeries: 1, MaxInMemoryTenants: 1},
+			reqs: map[string][]*cortexpbv2.WriteRequest{
+				"test": {
+					cortexpbv2.ToWriteRequestV2(
+						[]labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}})},
+						[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+						nil,
+						[]cortexpbv2.Metadata{
+							{Type: cortexpbv2.METRIC_TYPE_COUNTER, HelpRef: 3},
+						},
+						cortexpbv2.API,
+						"a help for metric_name_1"),
+				},
+			},
+			expectedErr: nil,
+		},
+
+		"should fail creating two series": {
+			limits: InstanceLimits{MaxInMemorySeries: 1, MaxInMemoryTenants: 1},
+			reqs: map[string][]*cortexpbv2.WriteRequest{
+				"test": {
+					cortexpbv2.ToWriteRequestV2(
+						[]labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test1"}})},
+						[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+						nil,
+						nil,
+						cortexpbv2.API),
+
+					cortexpbv2.ToWriteRequestV2(
+						[]labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test2"}})}, // another series
+						[]cortexpbv2.Sample{{Value: 1, Timestamp: 10}},
+						nil,
+						nil,
+						cortexpbv2.API),
+				},
+			},
+
+			expectedErr: wrapWithUser(errMaxSeriesLimitReached, "test"),
+		},
+
+		"should fail creating two users": {
+			limits: InstanceLimits{MaxInMemorySeries: 1, MaxInMemoryTenants: 1},
+			reqs: map[string][]*cortexpbv2.WriteRequest{
+				"user1": {
+					cortexpbv2.ToWriteRequestV2(
+						[]labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test1"}})},
+						[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+						nil,
+						nil,
+						cortexpbv2.API),
+				},
+
+				"user2": {
+					cortexpbv2.ToWriteRequestV2(
+						[]labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test2"}})}, // another series
+						[]cortexpbv2.Sample{{Value: 1, Timestamp: 10}},
+						nil,
+						nil,
+						cortexpbv2.API),
+				},
+			},
+			expectedErr: wrapWithUser(errMaxUsersLimitReached, "user2"),
+		},
+
+		"should fail pushing samples in two requests due to rate limit": {
+			limits: InstanceLimits{MaxInMemorySeries: 1, MaxInMemoryTenants: 1, MaxIngestionRate: 0.001},
+			reqs: map[string][]*cortexpbv2.WriteRequest{
+				"user1": {
+					cortexpbv2.ToWriteRequestV2(
+						[]labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test1"}})},
+						[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+						nil,
+						nil,
+						cortexpbv2.API),
+
+					cortexpbv2.ToWriteRequestV2(
+						[]labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test1"}})},
+						[]cortexpbv2.Sample{{Value: 1, Timestamp: 10}},
+						nil,
+						nil,
+						cortexpbv2.API),
+				},
+			},
+			expectedErr: errMaxSamplesPushRateLimitReached,
+		},
+	}
+
+	defaultInstanceLimits = nil
+
+	for testName, testData := range tests {
+		t.Run(testName, func(t *testing.T) {
+			// Create a mocked ingester
+			cfg := defaultIngesterTestConfig(t)
+			cfg.LifecyclerConfig.JoinAfter = 0
+			cfg.InstanceLimitsFn = func() *InstanceLimits {
+				return &testData.limits
+			}
+
+			i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry())
+			require.NoError(t, err)
+			require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+			defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+			// Wait until the ingester is ACTIVE
+			test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} {
+				return i.lifecycler.GetState()
+			})
+
+			// Iterate through users in sorted order (by username).
+			uids := []string{}
+			totalPushes := 0
+			for uid, requests := range testData.reqs {
+				uids = append(uids, uid)
+				totalPushes += len(requests)
+			}
+			sort.Strings(uids)
+
+			pushIdx := 0
+			for _, uid := range uids {
+				ctx := user.InjectOrgID(context.Background(), uid)
+
+				for _, req := range testData.reqs[uid] {
+					pushIdx++
+					_, err := i.PushV2(ctx, req)
+
+					if pushIdx < totalPushes {
+						require.NoError(t, err)
+					} else {
+						// Last push may expect error.
+						if testData.expectedErr != nil {
+							assert.Equal(t, testData.expectedErr, err)
+						} else if testData.expectedErrType != nil {
+							assert.True(t, errors.As(err, testData.expectedErrType), "expected error type %T, got %v", testData.expectedErrType, err)
+						} else {
+							assert.NoError(t, err)
+						}
+					}
+
+					// imitate time ticking between each push
+					i.ingestionRate.Tick()
+
+					rate := testutil.ToFloat64(i.metrics.ingestionRate)
+					require.NotZero(t, rate)
+				}
+			}
+		})
+	}
+}
+
+func TestIngesterPRW2_inflightPushRequests(t *testing.T) {
+	limits := InstanceLimits{MaxInflightPushRequests: 1}
+
+	// Create a mocked ingester
+	cfg := defaultIngesterTestConfig(t)
+	cfg.InstanceLimitsFn = func() *InstanceLimits { return &limits }
+	cfg.LifecyclerConfig.JoinAfter = 0
+
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until the ingester is ACTIVE
+	test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	ctx := user.InjectOrgID(context.Background(), "test")
+
+	startCh := make(chan struct{})
+
+	g, ctx := errgroup.WithContext(ctx)
+	g.Go(func() error {
+		count := 3500000
+		req := generateSamplesForLabelV2(labels.FromStrings(labels.MetricName, fmt.Sprintf("real-%d", count)), count)
+		// Signal that we're going to do the real push now.
+		close(startCh)
+
+		_, err := i.PushV2(ctx, req)
+		return err
+	})
+
+	g.Go(func() error {
+		select {
+		case <-ctx.Done():
+		// failed to setup
+		case <-startCh:
+			// we can start the test.
+		}
+
+		time.Sleep(10 * time.Millisecond) // Give first goroutine a chance to start pushing...
+		req := generateSamplesForLabelV2(labels.FromStrings(labels.MetricName, "testcase"), 1024)
+
+		_, err := i.PushV2(ctx, req)
+		require.Equal(t, errTooManyInflightPushRequests, err)
+		return nil
+	})
+
+	require.NoError(t, g.Wait())
+}
+
+func TestIngesterPRW2_QueryExemplar_MaxInflightQueryRequest(t *testing.T) {
+	cfg := defaultIngesterTestConfig(t)
+	cfg.DefaultLimits.MaxInflightQueryRequests = 1
+	i, err := prepareIngesterWithBlocksStorage(t, cfg, prometheus.NewRegistry())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
+
+	// Wait until it's ACTIVE
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	i.inflightQueryRequests.Add(1)
+
+	// Mock request
+	ctx := user.InjectOrgID(context.Background(), "test")
+
+	wreq, _ := mockWriteRequestV2(t, labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}, {Name: "route", Value: "get_user"}}, 1, 100000)
+	_, err = i.PushV2(ctx, wreq)
+	require.NoError(t, err)
+
+	rreq := &client.ExemplarQueryRequest{}
+	_, err = i.QueryExemplars(ctx, rreq)
+	require.Error(t, err)
+	require.Equal(t, err, errTooManyInflightQueryRequests)
+}
+
+func generateSamplesForLabelV2(lbs labels.Labels, count int) *cortexpbv2.WriteRequest {
+	var lbls = make([]labels.Labels, 0, count)
+	var samples = make([]cortexpbv2.Sample, 0, count)
+
+	for i := 0; i < count; i++ {
+		samples = append(samples, cortexpbv2.Sample{
+			Value:     float64(i),
+			Timestamp: int64(i),
+		})
+		lbls = append(lbls, lbs)
+	}
+
+	return cortexpbv2.ToWriteRequestV2(lbls, samples, nil, nil, cortexpbv2.API)
+}
+
+func mockWriteRequestWithMetadataV2(t *testing.T, lbls labels.Labels, value float64, timestamp int64, metadata cortexpbv2.Metadata, additionalSymbols ...string) (*cortexpbv2.WriteRequest, *client.QueryStreamResponse) {
+	samples := []cortexpbv2.Sample{
+		{
+			Timestamp: timestamp,
+			Value:     value,
+		},
+	}
+
+	req := cortexpbv2.ToWriteRequestV2([]labels.Labels{lbls}, samples, nil, []cortexpbv2.Metadata{metadata}, cortexpbv2.API, additionalSymbols...)
+
+	chunk := chunkenc.NewXORChunk()
+	app, err := chunk.Appender()
+	require.NoError(t, err)
+	app.Append(timestamp, value)
+	chunk.Compact()
+
+	expectedQueryStreamResChunks := &client.QueryStreamResponse{
+		Chunkseries: []client.TimeSeriesChunk{
+			{
+				Labels: cortexpb.FromLabelsToLabelAdapters(lbls),
+				Chunks: []client.Chunk{
+					{
+						StartTimestampMs: timestamp,
+						EndTimestampMs:   timestamp,
+						Encoding:         int32(encoding.PrometheusXorChunk),
+						Data:             chunk.Bytes(),
+					},
+				},
+			},
+		},
+	}
+
+	return req, expectedQueryStreamResChunks
+}
+
+func mockHistogramWriteRequestV2(t *testing.T, lbls labels.Labels, value int, timestampMs int64, float bool) (*cortexpbv2.WriteRequest, *client.QueryStreamResponse) {
+	var (
+		histograms []cortexpbv2.Histogram
+		h          *histogram.Histogram
+		fh         *histogram.FloatHistogram
+		c          chunkenc.Chunk
+	)
+	if float {
+		fh = tsdbutil.GenerateTestFloatHistogram(value)
+		histograms = []cortexpbv2.Histogram{
+			cortexpbv2.FloatHistogramToHistogramProto(timestampMs, fh),
+		}
+		c = chunkenc.NewFloatHistogramChunk()
+	} else {
+		h = tsdbutil.GenerateTestHistogram(value)
+		histograms = []cortexpbv2.Histogram{
+			cortexpbv2.HistogramToHistogramProto(timestampMs, h),
+		}
+		c = chunkenc.NewHistogramChunk()
+	}
+
+	app, err := c.Appender()
+	require.NoError(t, err)
+	if float {
+		_, _, _, err = app.AppendFloatHistogram(nil, timestampMs, fh, true)
+	} else {
+		_, _, _, err = app.AppendHistogram(nil, timestampMs, h, true)
+	}
+	require.NoError(t, err)
+	c.Compact()
+
+	req := cortexpbv2.ToWriteRequestV2([]labels.Labels{lbls}, nil, histograms, nil, cortexpbv2.API)
+	enc := int32(encoding.PrometheusHistogramChunk)
+	if float {
+		enc = int32(encoding.PrometheusFloatHistogramChunk)
+	}
+	expectedQueryStreamResChunks := &client.QueryStreamResponse{
+		Chunkseries: []client.TimeSeriesChunk{
+			{
+				Labels: cortexpb.FromLabelsToLabelAdapters(lbls),
+				Chunks: []client.Chunk{
+					{
+						StartTimestampMs: timestampMs,
+						EndTimestampMs:   timestampMs,
+						Encoding:         enc,
+						Data:             c.Bytes(),
+					},
+				},
+			},
+		},
+	}
+
+	return req, expectedQueryStreamResChunks
+}
+
+func mockWriteRequestV2(t *testing.T, lbls labels.Labels, value float64, timestamp int64) (*cortexpbv2.WriteRequest, *client.QueryStreamResponse) {
+	samples := []cortexpbv2.Sample{
+		{
+			Timestamp: timestamp,
+			Value:     value,
+		},
+	}
+
+	req := cortexpbv2.ToWriteRequestV2([]labels.Labels{lbls}, samples, nil, nil, cortexpbv2.API)
+
+	chunk := chunkenc.NewXORChunk()
+	app, err := chunk.Appender()
+	require.NoError(t, err)
+	app.Append(timestamp, value)
+	chunk.Compact()
+
+	expectedQueryStreamResChunks := &client.QueryStreamResponse{
+		Chunkseries: []client.TimeSeriesChunk{
+			{
+				Labels: cortexpb.FromLabelsToLabelAdapters(lbls),
+				Chunks: []client.Chunk{
+					{
+						StartTimestampMs: timestamp,
+						EndTimestampMs:   timestamp,
+						Encoding:         int32(encoding.PrometheusXorChunk),
+						Data:             chunk.Bytes(),
+					},
+				},
+			},
+		},
+	}
+
+	return req, expectedQueryStreamResChunks
+}
+
+func pushSingleSampleWithMetadataV2(t *testing.T, i *Ingester) {
+	ctx := user.InjectOrgID(context.Background(), userID)
+	metadata := cortexpbv2.Metadata{
+		Type:    cortexpbv2.METRIC_TYPE_COUNTER,
+		HelpRef: 3,
+		UnitRef: 0,
+	}
+
+	req, _ := mockWriteRequestWithMetadataV2(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, util.TimeToMillis(time.Now()), metadata, "a help for metric")
+	_, err := i.PushV2(ctx, req)
+	require.NoError(t, err)
+}
+
+func pushSingleSampleAtTimeV2(t *testing.T, i *Ingester, ts int64) {
+	ctx := user.InjectOrgID(context.Background(), userID)
+	req, _ := mockWriteRequestV2(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, ts)
+	_, err := i.PushV2(ctx, req)
+	require.NoError(t, err)
+}
+
+func writeRequestSingleSeriesV2(lbls labels.Labels, samples []cortexpbv2.Sample) *cortexpbv2.WriteRequest {
+	req := &cortexpbv2.WriteRequest{
+		Source: cortexpbv2.API,
+	}
+
+	st := writev2.NewSymbolTable()
+	ts := cortexpbv2.TimeSeries{}
+	ts.Samples = samples
+	ts.LabelsRefs = st.SymbolizeLabels(lbls, nil)
+	req.Timeseries = append(req.Timeseries, cortexpbv2.PreallocTimeseriesV2{TimeSeries: &ts})
+	req.Symbols = st.Symbols()
+
+	return req
+}
+
+// createIngesterWithSeries creates an ingester and push numSeries with numSamplesPerSeries each.
+func createIngesterWithSeriesV2(t testing.TB, userID string, numSeries, numSamplesPerSeries int, startTimestamp, step int64) *Ingester {
+	const maxBatchSize = 1000
+
+	// Create ingester.
+	i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry())
+	require.NoError(t, err)
+	require.NoError(t, services.StartAndAwaitRunning(context.Background(), i))
+	t.Cleanup(func() {
+		require.NoError(t, services.StopAndAwaitTerminated(context.Background(), i))
+	})
+
+	// Wait until it's ACTIVE.
+	test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} {
+		return i.lifecycler.GetState()
+	})
+
+	// Push fixtures.
+	ctx := user.InjectOrgID(context.Background(), userID)
+
+	for ts := startTimestamp; ts < startTimestamp+(step*int64(numSamplesPerSeries)); ts += step {
+		for o := 0; o < numSeries; o += maxBatchSize {
+			batchSize := min(maxBatchSize, numSeries-o)
+
+			// Generate metrics and samples (1 for each series).
+			metrics := make([]labels.Labels, 0, batchSize)
+			samples := make([]cortexpbv2.Sample, 0, batchSize)
+
+			for s := 0; s < batchSize; s++ {
+				metrics = append(metrics, labels.Labels{
+					{Name: labels.MetricName, Value: fmt.Sprintf("test_%d", o+s)},
+				})
+
+				samples = append(samples, cortexpbv2.Sample{
+					Timestamp: ts,
+					Value:     1,
+				})
+			}
+
+			// Send metrics to the ingester.
+			req := cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)
+			_, err := i.PushV2(ctx, req)
+			require.NoError(t, err)
+		}
+	}
+
+	return i
+}
+
+func benchmarkDataV2(nSeries int) (allLabels []labels.Labels, allSamples []cortexpbv2.Sample) {
+	for j := 0; j < nSeries; j++ {
+		labels := chunk.BenchmarkLabels.Copy()
+		for i := range labels {
+			if labels[i].Name == "cpu" {
+				labels[i].Value = fmt.Sprintf("cpu%02d", j)
+			}
+		}
+		allLabels = append(allLabels, labels)
+		allSamples = append(allSamples, cortexpbv2.Sample{Timestamp: 0, Value: float64(j)})
+	}
+	return
+}
diff --git a/pkg/util/validation/validate.go b/pkg/util/validation/validate.go
index be94cfa2f1..dda50dddb3 100644
--- a/pkg/util/validation/validate.go
+++ b/pkg/util/validation/validate.go
@@ -12,9 +12,12 @@ import (
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/common/model"
 	"github.com/prometheus/prometheus/model/histogram"
+	"github.com/prometheus/prometheus/model/labels"
+	writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
 	"github.com/weaveworks/common/httpgrpc"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
+	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/util"
 	"github.com/cortexproject/cortex/pkg/util/extract"
 )
@@ -24,6 +27,7 @@ const (
 
 	errMetadataMissingMetricName = "metadata missing metric name"
 	errMetadataTooLong           = "metadata '%s' value too long: %.200q metric %.200q"
+	errMetadataV2TooLong         = "metadata '%s' value too long: %.200q"
 
 	typeMetricName = "METRIC_NAME"
 	typeHelp       = "HELP"
@@ -148,6 +152,48 @@ func ValidateSampleTimestamp(validateMetrics *ValidateMetrics, limits *Limits, u
 	return nil
 }
 
+// ValidateExemplarV2 returns an error if the exemplar is invalid.
+// The returned error may retain the provided series labels.
+func ValidateExemplarV2(validateMetrics *ValidateMetrics, symbols []string, userID string, seriesLabels []cortexpb.LabelAdapter, e *cortexpbv2.Exemplar, b labels.ScratchBuilder, st *writev2.SymbolsTable) ValidationError {
+	lbs := e.ToLabels(&b, symbols)
+	// symbolize examplar labels
+	e.LabelsRefs = st.SymbolizeLabels(lbs, nil)
+	exemplarLabels := cortexpb.FromLabelsToLabelAdapters(lbs)
+
+	if len(exemplarLabels) <= 0 {
+		validateMetrics.DiscardedExemplars.WithLabelValues(exemplarLabelsMissing, userID).Inc()
+		return newExemplarEmtpyLabelsError(seriesLabels, []cortexpb.LabelAdapter{}, e.Timestamp)
+	}
+
+	if e.Timestamp == 0 {
+		validateMetrics.DiscardedExemplars.WithLabelValues(exemplarTimestampInvalid, userID).Inc()
+		return newExemplarMissingTimestampError(
+			seriesLabels,
+			exemplarLabels,
+			e.Timestamp,
+		)
+	}
+
+	// Exemplar label length does not include chars involved in text
+	// rendering such as quotes, commas, etc.  See spec and const definition.
+	labelSetLen := 0
+	for _, l := range exemplarLabels {
+		labelSetLen += utf8.RuneCountInString(l.Name)
+		labelSetLen += utf8.RuneCountInString(l.Value)
+	}
+
+	if labelSetLen > ExemplarMaxLabelSetLength {
+		validateMetrics.DiscardedExemplars.WithLabelValues(exemplarLabelsTooLong, userID).Inc()
+		return newExemplarLabelLengthError(
+			seriesLabels,
+			exemplarLabels,
+			e.Timestamp,
+		)
+	}
+
+	return nil
+}
+
 // ValidateExemplar returns an error if the exemplar is invalid.
 // The returned error may retain the provided series labels.
 func ValidateExemplar(validateMetrics *ValidateMetrics, userID string, ls []cortexpb.LabelAdapter, e cortexpb.Exemplar) ValidationError {
@@ -243,6 +289,37 @@ func ValidateLabels(validateMetrics *ValidateMetrics, limits *Limits, userID str
 	return nil
 }
 
+// ValidateMetadata returns an err if a metric metadata is invalid.
+func ValidateMetadataV2(validateMetrics *ValidateMetrics, cfg *Limits, userID string, symbols []string, metadata *cortexpbv2.Metadata, st *writev2.SymbolsTable) error {
+	help := symbols[metadata.HelpRef]
+	unit := symbols[metadata.UnitRef]
+
+	// symbolize help and unit
+	metadata.HelpRef = st.Symbolize(help)
+	metadata.UnitRef = st.Symbolize(unit)
+
+	maxMetadataValueLength := cfg.MaxMetadataLength
+	var reason string
+	var cause string
+	var metadataType string
+	if len(help) > maxMetadataValueLength {
+		metadataType = typeHelp
+		reason = helpTooLong
+		cause = help
+	} else if len(unit) > maxMetadataValueLength {
+		metadataType = typeUnit
+		reason = unitTooLong
+		cause = unit
+	}
+
+	if reason != "" {
+		validateMetrics.DiscardedMetadata.WithLabelValues(reason, userID).Inc()
+		return httpgrpc.Errorf(http.StatusBadRequest, errMetadataV2TooLong, metadataType, cause)
+	}
+
+	return nil
+}
+
 // ValidateMetadata returns an err if a metric metadata is invalid.
 func ValidateMetadata(validateMetrics *ValidateMetrics, cfg *Limits, userID string, metadata *cortexpb.MetricMetadata) error {
 	if cfg.EnforceMetadataMetricName && metadata.GetMetricFamilyName() == "" {
@@ -276,6 +353,67 @@ func ValidateMetadata(validateMetrics *ValidateMetrics, cfg *Limits, userID stri
 	return nil
 }
 
+func ValidateNativeHistogramV2(validateMetrics *ValidateMetrics, limits *Limits, userID string, ls []cortexpb.LabelAdapter, histogramSample cortexpbv2.Histogram) (cortexpbv2.Histogram, error) {
+	if limits.MaxNativeHistogramBuckets == 0 {
+		return histogramSample, nil
+	}
+
+	var (
+		exceedLimit bool
+	)
+	if histogramSample.IsFloatHistogram() {
+		// Initial check to see if the bucket limit is exceeded or not. If not, we can avoid type casting.
+		exceedLimit = len(histogramSample.PositiveCounts)+len(histogramSample.NegativeCounts) > limits.MaxNativeHistogramBuckets
+		if !exceedLimit {
+			return histogramSample, nil
+		}
+		// Exceed limit.
+		if histogramSample.Schema <= histogram.ExponentialSchemaMin {
+			validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc()
+			return cortexpbv2.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets)
+		}
+		fh := cortexpbv2.FloatHistogramProtoToFloatHistogram(histogramSample)
+		oBuckets := len(fh.PositiveBuckets) + len(fh.NegativeBuckets)
+		for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > limits.MaxNativeHistogramBuckets {
+			if fh.Schema <= histogram.ExponentialSchemaMin {
+				validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc()
+				return cortexpbv2.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets)
+			}
+			fh = fh.ReduceResolution(fh.Schema - 1)
+		}
+		if oBuckets != len(fh.PositiveBuckets)+len(fh.NegativeBuckets) {
+			validateMetrics.HistogramSamplesReducedResolution.WithLabelValues(userID).Inc()
+		}
+		// If resolution reduced, convert new float histogram to protobuf type again.
+		return cortexpbv2.FloatHistogramToHistogramProto(histogramSample.Timestamp, fh), nil
+	}
+
+	// Initial check to see if bucket limit is exceeded or not. If not, we can avoid type casting.
+	exceedLimit = len(histogramSample.PositiveDeltas)+len(histogramSample.NegativeDeltas) > limits.MaxNativeHistogramBuckets
+	if !exceedLimit {
+		return histogramSample, nil
+	}
+	// Exceed limit.
+	if histogramSample.Schema <= histogram.ExponentialSchemaMin {
+		validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc()
+		return cortexpbv2.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets)
+	}
+	h := cortexpbv2.HistogramProtoToHistogram(histogramSample)
+	oBuckets := len(h.PositiveBuckets) + len(h.NegativeBuckets)
+	for len(h.PositiveBuckets)+len(h.NegativeBuckets) > limits.MaxNativeHistogramBuckets {
+		if h.Schema <= histogram.ExponentialSchemaMin {
+			validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc()
+			return cortexpbv2.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets)
+		}
+		h = h.ReduceResolution(h.Schema - 1)
+	}
+	if oBuckets != len(h.PositiveBuckets)+len(h.NegativeBuckets) {
+		validateMetrics.HistogramSamplesReducedResolution.WithLabelValues(userID).Inc()
+	}
+	// If resolution reduced, convert new histogram to protobuf type again.
+	return cortexpbv2.HistogramToHistogramProto(histogramSample.Timestamp, h), nil
+}
+
 func ValidateNativeHistogram(validateMetrics *ValidateMetrics, limits *Limits, userID string, ls []cortexpb.LabelAdapter, histogramSample cortexpb.Histogram) (cortexpb.Histogram, error) {
 	if limits.MaxNativeHistogramBuckets == 0 {
 		return histogramSample, nil

From e739b3d0ba8acbb1589a3b5873fc158ecda70857 Mon Sep 17 00:00:00 2001
From: SungJin1212 <tjdwls1201@gmail.com>
Date: Mon, 11 Nov 2024 10:32:44 +0900
Subject: [PATCH 5/6] Integrate cortexpbv2 to cortexpb

Signed-off-by: SungJin1212 <tjdwls1201@gmail.com>
---
 Makefile                                      |    1 -
 pkg/api/api.go                                |    3 +-
 pkg/cortexpb/codecv2.go                       |   34 +
 pkg/cortexpb/compat.go                        |    4 +-
 pkg/cortexpb/compat_test.go                   |    4 +-
 pkg/{cortexpbv2 => cortexpb}/compatv2.go      |   12 +-
 pkg/{cortexpbv2 => cortexpb}/compatv2_test.go |   12 +-
 pkg/cortexpb/cortex.pb.go                     | 2359 ++++++++-
 pkg/cortexpb/cortex.proto                     |  117 +-
 pkg/{cortexpbv2 => cortexpb}/timeseriesv2.go  |   63 +-
 .../timeseriesv2_test.go                      |   20 +-
 pkg/cortexpbv2/codecv2.go                     |  160 -
 pkg/cortexpbv2/cortexv2.pb.go                 | 4340 -----------------
 pkg/cortexpbv2/cortexv2.proto                 |  227 -
 pkg/distributor/distributor.go                |  112 +-
 pkg/distributor/distributor_prw2_test.go      |  259 +-
 pkg/distributor/distributor_test.go           |   15 +-
 .../distributorpb/distributor.pb.go           |   38 +-
 .../distributorpb/distributor.proto           |    3 +-
 pkg/ingester/client/client.go                 |   15 +-
 pkg/ingester/client/ingester.pb.go            |  189 +-
 pkg/ingester/client/ingester.proto            |    3 +-
 pkg/ingester/ingester.go                      |   33 +-
 pkg/ingester/ingester_prw2_test.go            |  576 ++-
 pkg/ruler/compat.go                           |    3 +-
 pkg/util/push/otlp_test.go                    |    2 +-
 pkg/util/push/push.go                         |    7 +-
 pkg/util/push/push_test.go                    |   39 +-
 pkg/util/validation/validate.go               |   66 +-
 29 files changed, 3014 insertions(+), 5702 deletions(-)
 create mode 100644 pkg/cortexpb/codecv2.go
 rename pkg/{cortexpbv2 => cortexpb}/compatv2.go (85%)
 rename pkg/{cortexpbv2 => cortexpb}/compatv2_test.go (81%)
 rename pkg/{cortexpbv2 => cortexpb}/timeseriesv2.go (66%)
 rename pkg/{cortexpbv2 => cortexpb}/timeseriesv2_test.go (82%)
 delete mode 100644 pkg/cortexpbv2/codecv2.go
 delete mode 100644 pkg/cortexpbv2/cortexv2.pb.go
 delete mode 100644 pkg/cortexpbv2/cortexv2.proto

diff --git a/Makefile b/Makefile
index 5936409ebb..2704ee35b6 100644
--- a/Makefile
+++ b/Makefile
@@ -85,7 +85,6 @@ $(foreach exe, $(EXES), $(eval $(call dep_exe, $(exe))))
 
 # Manually declared dependencies And what goes into each exe
 pkg/cortexpb/cortex.pb.go: pkg/cortexpb/cortex.proto
-pkg/cortexpbv2/cortexv2.pb.go: pkg/cortexpbv2/cortexv2.proto
 pkg/ingester/client/ingester.pb.go: pkg/ingester/client/ingester.proto
 pkg/distributor/distributorpb/distributor.pb.go: pkg/distributor/distributorpb/distributor.proto
 pkg/ingester/wal.pb.go: pkg/ingester/wal.proto
diff --git a/pkg/api/api.go b/pkg/api/api.go
index 5afd9504ef..7145d5b8e2 100644
--- a/pkg/api/api.go
+++ b/pkg/api/api.go
@@ -23,7 +23,6 @@ import (
 	"github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb"
 	"github.com/cortexproject/cortex/pkg/compactor"
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/distributor"
 	"github.com/cortexproject/cortex/pkg/distributor/distributorpb"
 	frontendv1 "github.com/cortexproject/cortex/pkg/frontend/v1"
@@ -318,7 +317,7 @@ type Ingester interface {
 	AllUserStatsHandler(http.ResponseWriter, *http.Request)
 	ModeHandler(http.ResponseWriter, *http.Request)
 	Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error)
-	PushV2(context.Context, *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error)
+	PushV2(context.Context, *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error)
 }
 
 // RegisterIngester registers the ingesters HTTP and GRPC service
diff --git a/pkg/cortexpb/codecv2.go b/pkg/cortexpb/codecv2.go
new file mode 100644
index 0000000000..a80b3b314c
--- /dev/null
+++ b/pkg/cortexpb/codecv2.go
@@ -0,0 +1,34 @@
+package cortexpb
+
+import (
+	"github.com/prometheus/prometheus/model/labels"
+)
+
+// ToLabels return model labels.Labels from timeseries' remote labels.
+func (t TimeSeriesV2) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels {
+	return desymbolizeLabels(b, t.GetLabelsRefs(), symbols)
+}
+
+// ToLabels return model labels.Labels from exemplar remote labels.
+func (e ExemplarV2) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels {
+	return desymbolizeLabels(b, e.GetLabelsRefs(), symbols)
+}
+
+func (m MetadataV2) ToV1Metadata(name string, symbols []string) *MetricMetadata {
+	return &MetricMetadata{
+		Type:             m.Type,
+		MetricFamilyName: name,
+		Unit:             symbols[m.UnitRef],
+		Help:             symbols[m.HelpRef],
+	}
+}
+
+// desymbolizeLabels decodes label references, with given symbols to labels.
+func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels {
+	b.Reset()
+	for i := 0; i < len(labelRefs); i += 2 {
+		b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]])
+	}
+	b.Sort()
+	return b.Labels()
+}
diff --git a/pkg/cortexpb/compat.go b/pkg/cortexpb/compat.go
index 6de2423d56..4d8cb0c1ee 100644
--- a/pkg/cortexpb/compat.go
+++ b/pkg/cortexpb/compat.go
@@ -20,7 +20,7 @@ import (
 
 // ToWriteRequest converts matched slices of Labels, Samples, Metadata and Histograms into a WriteRequest proto.
 // It gets timeseries from the pool, so ReuseSlice() should be called when done.
-func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMetadata, histograms []Histogram, source WriteRequest_SourceEnum) *WriteRequest {
+func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMetadata, histograms []Histogram, source SourceEnum) *WriteRequest {
 	req := &WriteRequest{
 		Timeseries: PreallocTimeseriesSliceFromPool(),
 		Metadata:   metadata,
@@ -170,7 +170,7 @@ func (s byLabel) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
 
 // MetricMetadataMetricTypeToMetricType converts a metric type from our internal client
 // to a Prometheus one.
-func MetricMetadataMetricTypeToMetricType(mt MetricMetadata_MetricType) model.MetricType {
+func MetricMetadataMetricTypeToMetricType(mt MetricType) model.MetricType {
 	switch mt {
 	case UNKNOWN:
 		return model.MetricTypeUnknown
diff --git a/pkg/cortexpb/compat_test.go b/pkg/cortexpb/compat_test.go
index 6fda91a84e..1f8d11ad1c 100644
--- a/pkg/cortexpb/compat_test.go
+++ b/pkg/cortexpb/compat_test.go
@@ -74,7 +74,7 @@ func testUnmarshalling(t *testing.T, unmarshalFn func(data []byte, v interface{}
 func TestMetricMetadataToMetricTypeToMetricType(t *testing.T) {
 	tc := []struct {
 		desc     string
-		input    MetricMetadata_MetricType
+		input    MetricType
 		expected model.MetricType
 	}{
 		{
@@ -89,7 +89,7 @@ func TestMetricMetadataToMetricTypeToMetricType(t *testing.T) {
 		},
 		{
 			desc:     "with an unknown metric",
-			input:    MetricMetadata_MetricType(100),
+			input:    MetricType(100),
 			expected: model.MetricTypeUnknown,
 		},
 	}
diff --git a/pkg/cortexpbv2/compatv2.go b/pkg/cortexpb/compatv2.go
similarity index 85%
rename from pkg/cortexpbv2/compatv2.go
rename to pkg/cortexpb/compatv2.go
index 13d42d000f..ed6e441624 100644
--- a/pkg/cortexpbv2/compatv2.go
+++ b/pkg/cortexpb/compatv2.go
@@ -1,14 +1,12 @@
-package cortexpbv2
+package cortexpb
 
 import (
 	"github.com/prometheus/prometheus/model/labels"
 	writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
-
-	"github.com/cortexproject/cortex/pkg/cortexpb"
 )
 
 // ToWriteRequestV2 converts matched slices of Labels, Samples, and Histograms into a WriteRequest proto.
-func ToWriteRequestV2(lbls []labels.Labels, samples []Sample, histograms []Histogram, metadata []Metadata, source WriteRequest_SourceEnum, help ...string) *WriteRequest {
+func ToWriteRequestV2(lbls []labels.Labels, samples []Sample, histograms []Histogram, metadata []MetadataV2, source SourceEnum, help ...string) *WriteRequestV2 {
 	st := writev2.NewSymbolTable()
 	labelRefs := make([][]uint32, 0, len(lbls))
 	for _, lbl := range lbls {
@@ -21,7 +19,7 @@ func ToWriteRequestV2(lbls []labels.Labels, samples []Sample, histograms []Histo
 
 	symbols := st.Symbols()
 
-	req := &WriteRequest{
+	req := &WriteRequestV2{
 		Timeseries: PreallocTimeseriesV2SliceFromPool(),
 		Symbols:    symbols,
 		Source:     source,
@@ -41,13 +39,13 @@ func ToWriteRequestV2(lbls []labels.Labels, samples []Sample, histograms []Histo
 			ts.Metadata = metadata[i]
 		}
 		i++
-		req.Timeseries = append(req.Timeseries, PreallocTimeseriesV2{TimeSeries: ts})
+		req.Timeseries = append(req.Timeseries, PreallocTimeseriesV2{TimeSeriesV2: ts})
 	}
 
 	return req
 }
 
-func GetLabelRefsFromLabelAdapters(symbols []string, las []cortexpb.LabelAdapter) []uint32 {
+func GetLabelRefsFromLabelAdapters(symbols []string, las []LabelAdapter) []uint32 {
 	var ret []uint32
 
 	symbolMap := map[string]uint32{}
diff --git a/pkg/cortexpbv2/compatv2_test.go b/pkg/cortexpb/compatv2_test.go
similarity index 81%
rename from pkg/cortexpbv2/compatv2_test.go
rename to pkg/cortexpb/compatv2_test.go
index a6d809e0a4..b25c5c2064 100644
--- a/pkg/cortexpbv2/compatv2_test.go
+++ b/pkg/cortexpb/compatv2_test.go
@@ -1,33 +1,31 @@
-package cortexpbv2
+package cortexpb
 
 import (
 	"testing"
 
 	"github.com/prometheus/prometheus/model/labels"
 	"github.com/stretchr/testify/require"
-
-	"github.com/cortexproject/cortex/pkg/cortexpb"
 )
 
 func Test_GetLabelRefsFromLabelAdapters(t *testing.T) {
 	tests := []struct {
 		symbols            []string
-		lbs                []cortexpb.LabelAdapter
+		lbs                []LabelAdapter
 		expectedSeriesRefs []uint32
 	}{
 		{
 			symbols:            []string{"", "__name__", "test_metric", "foo", "bar", "baz", "qux"},
-			lbs:                []cortexpb.LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "foo", Value: "bar"}},
+			lbs:                []LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "foo", Value: "bar"}},
 			expectedSeriesRefs: []uint32{1, 2, 3, 4},
 		},
 		{
 			symbols:            []string{"", "__name__", "test_metric", "foo", "bar", "baz", "qux"},
-			lbs:                []cortexpb.LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "baz", Value: "qux"}},
+			lbs:                []LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "baz", Value: "qux"}},
 			expectedSeriesRefs: []uint32{1, 2, 5, 6},
 		},
 		{
 			symbols:            []string{"", "__name__", "test_metric", "foo", "bar", "baz", "qux", "1"},
-			lbs:                []cortexpb.LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "baz", Value: "qux"}, {Name: "qux", Value: "1"}},
+			lbs:                []LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "baz", Value: "qux"}, {Name: "qux", Value: "1"}},
 			expectedSeriesRefs: []uint32{1, 2, 5, 6, 6, 7},
 		},
 	}
diff --git a/pkg/cortexpb/cortex.pb.go b/pkg/cortexpb/cortex.pb.go
index 3b63e15904..104a97c807 100644
--- a/pkg/cortexpb/cortex.pb.go
+++ b/pkg/cortexpb/cortex.pb.go
@@ -28,41 +28,41 @@ var _ = math.Inf
 // proto package needs to be updated.
 const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
-type WriteRequest_SourceEnum int32
+type SourceEnum int32
 
 const (
-	API  WriteRequest_SourceEnum = 0
-	RULE WriteRequest_SourceEnum = 1
+	API  SourceEnum = 0
+	RULE SourceEnum = 1
 )
 
-var WriteRequest_SourceEnum_name = map[int32]string{
+var SourceEnum_name = map[int32]string{
 	0: "API",
 	1: "RULE",
 }
 
-var WriteRequest_SourceEnum_value = map[string]int32{
+var SourceEnum_value = map[string]int32{
 	"API":  0,
 	"RULE": 1,
 }
 
-func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_893a47d0a749d749, []int{0, 0}
+func (SourceEnum) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_893a47d0a749d749, []int{0}
 }
 
-type MetricMetadata_MetricType int32
+type MetricType int32
 
 const (
-	UNKNOWN        MetricMetadata_MetricType = 0
-	COUNTER        MetricMetadata_MetricType = 1
-	GAUGE          MetricMetadata_MetricType = 2
-	HISTOGRAM      MetricMetadata_MetricType = 3
-	GAUGEHISTOGRAM MetricMetadata_MetricType = 4
-	SUMMARY        MetricMetadata_MetricType = 5
-	INFO           MetricMetadata_MetricType = 6
-	STATESET       MetricMetadata_MetricType = 7
+	UNKNOWN        MetricType = 0
+	COUNTER        MetricType = 1
+	GAUGE          MetricType = 2
+	HISTOGRAM      MetricType = 3
+	GAUGEHISTOGRAM MetricType = 4
+	SUMMARY        MetricType = 5
+	INFO           MetricType = 6
+	STATESET       MetricType = 7
 )
 
-var MetricMetadata_MetricType_name = map[int32]string{
+var MetricType_name = map[int32]string{
 	0: "UNKNOWN",
 	1: "COUNTER",
 	2: "GAUGE",
@@ -73,7 +73,7 @@ var MetricMetadata_MetricType_name = map[int32]string{
 	7: "STATESET",
 }
 
-var MetricMetadata_MetricType_value = map[string]int32{
+var MetricType_value = map[string]int32{
 	"UNKNOWN":        0,
 	"COUNTER":        1,
 	"GAUGE":          2,
@@ -84,8 +84,8 @@ var MetricMetadata_MetricType_value = map[string]int32{
 	"STATESET":       7,
 }
 
-func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_893a47d0a749d749, []int{5, 0}
+func (MetricType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_893a47d0a749d749, []int{1}
 }
 
 type Histogram_ResetHint int32
@@ -112,20 +112,387 @@ var Histogram_ResetHint_value = map[string]int32{
 }
 
 func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_893a47d0a749d749, []int{8, 0}
+	return fileDescriptor_893a47d0a749d749, []int{13, 0}
+}
+
+// https://github.com/prometheus/prometheus/blob/main/prompb/io/prometheus/write/v2/types.proto
+type WriteRequestV2 struct {
+	Source                  SourceEnum             `protobuf:"varint,3,opt,name=Source,proto3,enum=cortexpb.SourceEnum" json:"Source,omitempty"`
+	Symbols                 []string               `protobuf:"bytes,4,rep,name=symbols,proto3" json:"symbols,omitempty"`
+	Timeseries              []PreallocTimeseriesV2 `protobuf:"bytes,5,rep,name=timeseries,proto3,customtype=PreallocTimeseriesV2" json:"timeseries"`
+	SkipLabelNameValidation bool                   `protobuf:"varint,1000,opt,name=skip_label_name_validation,json=skipLabelNameValidation,proto3" json:"skip_label_name_validation,omitempty"`
+}
+
+func (m *WriteRequestV2) Reset()      { *m = WriteRequestV2{} }
+func (*WriteRequestV2) ProtoMessage() {}
+func (*WriteRequestV2) Descriptor() ([]byte, []int) {
+	return fileDescriptor_893a47d0a749d749, []int{0}
+}
+func (m *WriteRequestV2) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WriteRequestV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WriteRequestV2.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WriteRequestV2) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WriteRequestV2.Merge(m, src)
+}
+func (m *WriteRequestV2) XXX_Size() int {
+	return m.Size()
+}
+func (m *WriteRequestV2) XXX_DiscardUnknown() {
+	xxx_messageInfo_WriteRequestV2.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WriteRequestV2 proto.InternalMessageInfo
+
+func (m *WriteRequestV2) GetSource() SourceEnum {
+	if m != nil {
+		return m.Source
+	}
+	return API
+}
+
+func (m *WriteRequestV2) GetSymbols() []string {
+	if m != nil {
+		return m.Symbols
+	}
+	return nil
+}
+
+func (m *WriteRequestV2) GetSkipLabelNameValidation() bool {
+	if m != nil {
+		return m.SkipLabelNameValidation
+	}
+	return false
+}
+
+type WriteResponseV2 struct {
+	// Samples represents X-Prometheus-Remote-Write-Written-Samples
+	Samples int64 `protobuf:"varint,1,opt,name=Samples,proto3" json:"Samples,omitempty"`
+	// Histograms represents X-Prometheus-Remote-Write-Written-Histograms
+	Histograms int64 `protobuf:"varint,2,opt,name=Histograms,proto3" json:"Histograms,omitempty"`
+	// Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars
+	Exemplars int64 `protobuf:"varint,3,opt,name=Exemplars,proto3" json:"Exemplars,omitempty"`
+}
+
+func (m *WriteResponseV2) Reset()      { *m = WriteResponseV2{} }
+func (*WriteResponseV2) ProtoMessage() {}
+func (*WriteResponseV2) Descriptor() ([]byte, []int) {
+	return fileDescriptor_893a47d0a749d749, []int{1}
+}
+func (m *WriteResponseV2) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WriteResponseV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WriteResponseV2.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WriteResponseV2) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WriteResponseV2.Merge(m, src)
+}
+func (m *WriteResponseV2) XXX_Size() int {
+	return m.Size()
+}
+func (m *WriteResponseV2) XXX_DiscardUnknown() {
+	xxx_messageInfo_WriteResponseV2.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WriteResponseV2 proto.InternalMessageInfo
+
+func (m *WriteResponseV2) GetSamples() int64 {
+	if m != nil {
+		return m.Samples
+	}
+	return 0
+}
+
+func (m *WriteResponseV2) GetHistograms() int64 {
+	if m != nil {
+		return m.Histograms
+	}
+	return 0
+}
+
+func (m *WriteResponseV2) GetExemplars() int64 {
+	if m != nil {
+		return m.Exemplars
+	}
+	return 0
+}
+
+type TimeSeriesV2 struct {
+	LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"`
+	// Timeseries messages can either specify samples or (native) histogram samples
+	// (histogram field), but not both. For a typical sender (real-time metric
+	// streaming), in healthy cases, there will be only one sample or histogram.
+	//
+	// Samples and histograms are sorted by timestamp (older first).
+	Samples    []Sample    `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
+	Histograms []Histogram `protobuf:"bytes,3,rep,name=histograms,proto3" json:"histograms"`
+	// exemplars represents an optional set of exemplars attached to this series' samples.
+	Exemplars []ExemplarV2 `protobuf:"bytes,4,rep,name=exemplars,proto3" json:"exemplars"`
+	// metadata represents the metadata associated with the given series' samples.
+	Metadata MetadataV2 `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"`
+	// created_timestamp represents an optional created timestamp associated with
+	// this series' samples in ms format, typically for counter or histogram type
+	// metrics. Created timestamp represents the time when the counter started
+	// counting (sometimes referred to as start timestamp), which can increase
+	// the accuracy of query results.
+	//
+	// Note that some receivers might require this and in return fail to
+	// ingest such samples within the Request.
+	//
+	// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+	// for conversion from/to time.Time to Prometheus timestamp.
+	//
+	// Note that the "optional" keyword is omitted due to
+	// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
+	// Zero value means value not set. If you need to use exactly zero value for
+	// the timestamp, use 1 millisecond before or after.
+	CreatedTimestamp int64 `protobuf:"varint,6,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
+}
+
+func (m *TimeSeriesV2) Reset()      { *m = TimeSeriesV2{} }
+func (*TimeSeriesV2) ProtoMessage() {}
+func (*TimeSeriesV2) Descriptor() ([]byte, []int) {
+	return fileDescriptor_893a47d0a749d749, []int{2}
+}
+func (m *TimeSeriesV2) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TimeSeriesV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TimeSeriesV2.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TimeSeriesV2) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TimeSeriesV2.Merge(m, src)
+}
+func (m *TimeSeriesV2) XXX_Size() int {
+	return m.Size()
+}
+func (m *TimeSeriesV2) XXX_DiscardUnknown() {
+	xxx_messageInfo_TimeSeriesV2.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TimeSeriesV2 proto.InternalMessageInfo
+
+func (m *TimeSeriesV2) GetLabelsRefs() []uint32 {
+	if m != nil {
+		return m.LabelsRefs
+	}
+	return nil
+}
+
+func (m *TimeSeriesV2) GetSamples() []Sample {
+	if m != nil {
+		return m.Samples
+	}
+	return nil
+}
+
+func (m *TimeSeriesV2) GetHistograms() []Histogram {
+	if m != nil {
+		return m.Histograms
+	}
+	return nil
+}
+
+func (m *TimeSeriesV2) GetExemplars() []ExemplarV2 {
+	if m != nil {
+		return m.Exemplars
+	}
+	return nil
+}
+
+func (m *TimeSeriesV2) GetMetadata() MetadataV2 {
+	if m != nil {
+		return m.Metadata
+	}
+	return MetadataV2{}
+}
+
+func (m *TimeSeriesV2) GetCreatedTimestamp() int64 {
+	if m != nil {
+		return m.CreatedTimestamp
+	}
+	return 0
+}
+
+// Exemplar is an additional information attached to some series' samples.
+// It is typically used to attach an example trace or request ID associated with
+// the metric changes.
+type ExemplarV2 struct {
+	// labels_refs is an optional list of label name-value pair references, encoded
+	// as indices to the Request.symbols array. This list's len is always
+	// a multiple of 2, and the underlying labels should be sorted lexicographically.
+	// If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
+	LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"`
+	// value represents an exact example value. This can be useful when the exemplar
+	// is attached to a histogram, which only gives an estimated value through buckets.
+	Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
+	// timestamp represents the timestamp of the exemplar in ms.
+	//
+	// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+	// for conversion from/to time.Time to Prometheus timestamp.
+	Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+}
+
+func (m *ExemplarV2) Reset()      { *m = ExemplarV2{} }
+func (*ExemplarV2) ProtoMessage() {}
+func (*ExemplarV2) Descriptor() ([]byte, []int) {
+	return fileDescriptor_893a47d0a749d749, []int{3}
+}
+func (m *ExemplarV2) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ExemplarV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ExemplarV2.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ExemplarV2) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExemplarV2.Merge(m, src)
+}
+func (m *ExemplarV2) XXX_Size() int {
+	return m.Size()
+}
+func (m *ExemplarV2) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExemplarV2.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExemplarV2 proto.InternalMessageInfo
+
+func (m *ExemplarV2) GetLabelsRefs() []uint32 {
+	if m != nil {
+		return m.LabelsRefs
+	}
+	return nil
+}
+
+func (m *ExemplarV2) GetValue() float64 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+func (m *ExemplarV2) GetTimestamp() int64 {
+	if m != nil {
+		return m.Timestamp
+	}
+	return 0
+}
+
+// Metadata represents the metadata associated with the given series' samples.
+type MetadataV2 struct {
+	Type MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortexpb.MetricType" json:"type,omitempty"`
+	// help_ref is a reference to the Request.symbols array representing help
+	// text for the metric. Help is optional, reference should point to an empty string in
+	// such a case.
+	HelpRef uint32 `protobuf:"varint,3,opt,name=help_ref,json=helpRef,proto3" json:"help_ref,omitempty"`
+	// unit_ref is a reference to the Request.symbols array representing a unit
+	// for the metric. Unit is optional, reference should point to an empty string in
+	// such a case.
+	UnitRef uint32 `protobuf:"varint,4,opt,name=unit_ref,json=unitRef,proto3" json:"unit_ref,omitempty"`
+}
+
+func (m *MetadataV2) Reset()      { *m = MetadataV2{} }
+func (*MetadataV2) ProtoMessage() {}
+func (*MetadataV2) Descriptor() ([]byte, []int) {
+	return fileDescriptor_893a47d0a749d749, []int{4}
+}
+func (m *MetadataV2) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MetadataV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MetadataV2.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MetadataV2) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetadataV2.Merge(m, src)
+}
+func (m *MetadataV2) XXX_Size() int {
+	return m.Size()
+}
+func (m *MetadataV2) XXX_DiscardUnknown() {
+	xxx_messageInfo_MetadataV2.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetadataV2 proto.InternalMessageInfo
+
+func (m *MetadataV2) GetType() MetricType {
+	if m != nil {
+		return m.Type
+	}
+	return UNKNOWN
+}
+
+func (m *MetadataV2) GetHelpRef() uint32 {
+	if m != nil {
+		return m.HelpRef
+	}
+	return 0
+}
+
+func (m *MetadataV2) GetUnitRef() uint32 {
+	if m != nil {
+		return m.UnitRef
+	}
+	return 0
 }
 
 type WriteRequest struct {
-	Timeseries              []PreallocTimeseries    `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"`
-	Source                  WriteRequest_SourceEnum `protobuf:"varint,2,opt,name=Source,proto3,enum=cortexpb.WriteRequest_SourceEnum" json:"Source,omitempty"`
-	Metadata                []*MetricMetadata       `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty"`
-	SkipLabelNameValidation bool                    `protobuf:"varint,1000,opt,name=skip_label_name_validation,json=skipLabelNameValidation,proto3" json:"skip_label_name_validation,omitempty"`
+	Timeseries              []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"`
+	Source                  SourceEnum           `protobuf:"varint,2,opt,name=Source,proto3,enum=cortexpb.SourceEnum" json:"Source,omitempty"`
+	Metadata                []*MetricMetadata    `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty"`
+	SkipLabelNameValidation bool                 `protobuf:"varint,1000,opt,name=skip_label_name_validation,json=skipLabelNameValidation,proto3" json:"skip_label_name_validation,omitempty"`
 }
 
 func (m *WriteRequest) Reset()      { *m = WriteRequest{} }
 func (*WriteRequest) ProtoMessage() {}
 func (*WriteRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_893a47d0a749d749, []int{0}
+	return fileDescriptor_893a47d0a749d749, []int{5}
 }
 func (m *WriteRequest) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -154,7 +521,7 @@ func (m *WriteRequest) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_WriteRequest proto.InternalMessageInfo
 
-func (m *WriteRequest) GetSource() WriteRequest_SourceEnum {
+func (m *WriteRequest) GetSource() SourceEnum {
 	if m != nil {
 		return m.Source
 	}
@@ -181,7 +548,7 @@ type WriteResponse struct {
 func (m *WriteResponse) Reset()      { *m = WriteResponse{} }
 func (*WriteResponse) ProtoMessage() {}
 func (*WriteResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_893a47d0a749d749, []int{1}
+	return fileDescriptor_893a47d0a749d749, []int{6}
 }
 func (m *WriteResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -221,7 +588,7 @@ type TimeSeries struct {
 func (m *TimeSeries) Reset()      { *m = TimeSeries{} }
 func (*TimeSeries) ProtoMessage() {}
 func (*TimeSeries) Descriptor() ([]byte, []int) {
-	return fileDescriptor_893a47d0a749d749, []int{2}
+	return fileDescriptor_893a47d0a749d749, []int{7}
 }
 func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -279,7 +646,7 @@ type LabelPair struct {
 func (m *LabelPair) Reset()      { *m = LabelPair{} }
 func (*LabelPair) ProtoMessage() {}
 func (*LabelPair) Descriptor() ([]byte, []int) {
-	return fileDescriptor_893a47d0a749d749, []int{3}
+	return fileDescriptor_893a47d0a749d749, []int{8}
 }
 func (m *LabelPair) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -330,7 +697,7 @@ type Sample struct {
 func (m *Sample) Reset()      { *m = Sample{} }
 func (*Sample) ProtoMessage() {}
 func (*Sample) Descriptor() ([]byte, []int) {
-	return fileDescriptor_893a47d0a749d749, []int{4}
+	return fileDescriptor_893a47d0a749d749, []int{9}
 }
 func (m *Sample) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -374,16 +741,16 @@ func (m *Sample) GetTimestampMs() int64 {
 }
 
 type MetricMetadata struct {
-	Type             MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortexpb.MetricMetadata_MetricType" json:"type,omitempty"`
-	MetricFamilyName string                    `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"`
-	Help             string                    `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"`
-	Unit             string                    `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"`
+	Type             MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortexpb.MetricType" json:"type,omitempty"`
+	MetricFamilyName string     `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"`
+	Help             string     `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"`
+	Unit             string     `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"`
 }
 
 func (m *MetricMetadata) Reset()      { *m = MetricMetadata{} }
 func (*MetricMetadata) ProtoMessage() {}
 func (*MetricMetadata) Descriptor() ([]byte, []int) {
-	return fileDescriptor_893a47d0a749d749, []int{5}
+	return fileDescriptor_893a47d0a749d749, []int{10}
 }
 func (m *MetricMetadata) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -412,7 +779,7 @@ func (m *MetricMetadata) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_MetricMetadata proto.InternalMessageInfo
 
-func (m *MetricMetadata) GetType() MetricMetadata_MetricType {
+func (m *MetricMetadata) GetType() MetricType {
 	if m != nil {
 		return m.Type
 	}
@@ -447,7 +814,7 @@ type Metric struct {
 func (m *Metric) Reset()      { *m = Metric{} }
 func (*Metric) ProtoMessage() {}
 func (*Metric) Descriptor() ([]byte, []int) {
-	return fileDescriptor_893a47d0a749d749, []int{6}
+	return fileDescriptor_893a47d0a749d749, []int{11}
 }
 func (m *Metric) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -486,7 +853,7 @@ type Exemplar struct {
 func (m *Exemplar) Reset()      { *m = Exemplar{} }
 func (*Exemplar) ProtoMessage() {}
 func (*Exemplar) Descriptor() ([]byte, []int) {
-	return fileDescriptor_893a47d0a749d749, []int{7}
+	return fileDescriptor_893a47d0a749d749, []int{12}
 }
 func (m *Exemplar) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -579,7 +946,7 @@ type Histogram struct {
 func (m *Histogram) Reset()      { *m = Histogram{} }
 func (*Histogram) ProtoMessage() {}
 func (*Histogram) Descriptor() ([]byte, []int) {
-	return fileDescriptor_893a47d0a749d749, []int{8}
+	return fileDescriptor_893a47d0a749d749, []int{13}
 }
 func (m *Histogram) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -780,7 +1147,7 @@ type BucketSpan struct {
 func (m *BucketSpan) Reset()      { *m = BucketSpan{} }
 func (*BucketSpan) ProtoMessage() {}
 func (*BucketSpan) Descriptor() ([]byte, []int) {
-	return fileDescriptor_893a47d0a749d749, []int{9}
+	return fileDescriptor_893a47d0a749d749, []int{14}
 }
 func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -824,9 +1191,14 @@ func (m *BucketSpan) GetLength() uint32 {
 }
 
 func init() {
-	proto.RegisterEnum("cortexpb.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value)
-	proto.RegisterEnum("cortexpb.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value)
+	proto.RegisterEnum("cortexpb.SourceEnum", SourceEnum_name, SourceEnum_value)
+	proto.RegisterEnum("cortexpb.MetricType", MetricType_name, MetricType_value)
 	proto.RegisterEnum("cortexpb.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value)
+	proto.RegisterType((*WriteRequestV2)(nil), "cortexpb.WriteRequestV2")
+	proto.RegisterType((*WriteResponseV2)(nil), "cortexpb.WriteResponseV2")
+	proto.RegisterType((*TimeSeriesV2)(nil), "cortexpb.TimeSeriesV2")
+	proto.RegisterType((*ExemplarV2)(nil), "cortexpb.ExemplarV2")
+	proto.RegisterType((*MetadataV2)(nil), "cortexpb.MetadataV2")
 	proto.RegisterType((*WriteRequest)(nil), "cortexpb.WriteRequest")
 	proto.RegisterType((*WriteResponse)(nil), "cortexpb.WriteResponse")
 	proto.RegisterType((*TimeSeries)(nil), "cortexpb.TimeSeries")
@@ -842,83 +1214,97 @@ func init() {
 func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) }
 
 var fileDescriptor_893a47d0a749d749 = []byte{
-	// 1031 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4b, 0x6f, 0x23, 0x45,
-	0x17, 0xed, 0x72, 0xfb, 0x79, 0x63, 0x3b, 0x3d, 0xf5, 0x45, 0x1f, 0xad, 0x48, 0xd3, 0x71, 0x1a,
-	0x01, 0x16, 0x42, 0x01, 0x05, 0x01, 0x9a, 0x51, 0x84, 0x64, 0x0f, 0xce, 0x43, 0x33, 0x76, 0xa2,
-	0xb2, 0xc3, 0x68, 0xd8, 0x58, 0x15, 0xa7, 0x12, 0xb7, 0xa6, 0x5f, 0x74, 0x95, 0xa3, 0x09, 0x2b,
-	0x56, 0x88, 0x25, 0x6b, 0xb6, 0x6c, 0xf8, 0x05, 0xfc, 0x86, 0x2c, 0xb3, 0x1c, 0xb1, 0x88, 0x88,
-	0xb3, 0x99, 0xe5, 0x2c, 0xf8, 0x01, 0xa8, 0xaa, 0x5f, 0xce, 0x84, 0x11, 0x9b, 0xd9, 0x55, 0x9d,
-	0x7b, 0xcf, 0xbd, 0xa7, 0xea, 0x9e, 0x2e, 0x35, 0xd4, 0x27, 0x41, 0x24, 0xd8, 0x8b, 0x8d, 0x30,
-	0x0a, 0x44, 0x80, 0xab, 0xf1, 0x2e, 0x3c, 0x5a, 0x5d, 0x39, 0x0d, 0x4e, 0x03, 0x05, 0x7e, 0x2a,
-	0x57, 0x71, 0xdc, 0xfe, 0xa3, 0x00, 0xf5, 0xa7, 0x91, 0x23, 0x18, 0x61, 0xdf, 0xcf, 0x18, 0x17,
-	0xf8, 0x00, 0x40, 0x38, 0x1e, 0xe3, 0x2c, 0x72, 0x18, 0x37, 0x51, 0x4b, 0x6f, 0x2f, 0x6d, 0xae,
-	0x6c, 0xa4, 0x55, 0x36, 0x46, 0x8e, 0xc7, 0x86, 0x2a, 0xd6, 0x5d, 0xbd, 0xb8, 0x5a, 0xd3, 0xfe,
-	0xbc, 0x5a, 0xc3, 0x07, 0x11, 0xa3, 0xae, 0x1b, 0x4c, 0x46, 0x19, 0x8f, 0x2c, 0xd4, 0xc0, 0x0f,
-	0xa0, 0x3c, 0x0c, 0x66, 0xd1, 0x84, 0x99, 0x85, 0x16, 0x6a, 0x37, 0x37, 0xd7, 0xf3, 0x6a, 0x8b,
-	0x9d, 0x37, 0xe2, 0xa4, 0x9e, 0x3f, 0xf3, 0x48, 0x42, 0xc0, 0x0f, 0xa1, 0xea, 0x31, 0x41, 0x8f,
-	0xa9, 0xa0, 0xa6, 0xae, 0xa4, 0x98, 0x39, 0xb9, 0xcf, 0x44, 0xe4, 0x4c, 0xfa, 0x49, 0xbc, 0x5b,
-	0xbc, 0xb8, 0x5a, 0x43, 0x24, 0xcb, 0xc7, 0x5b, 0xb0, 0xca, 0x9f, 0x3b, 0xe1, 0xd8, 0xa5, 0x47,
-	0xcc, 0x1d, 0xfb, 0xd4, 0x63, 0xe3, 0x33, 0xea, 0x3a, 0xc7, 0x54, 0x38, 0x81, 0x6f, 0xbe, 0xaa,
-	0xb4, 0x50, 0xbb, 0x4a, 0xde, 0x93, 0x29, 0x4f, 0x64, 0xc6, 0x80, 0x7a, 0xec, 0xdb, 0x2c, 0x6e,
-	0xaf, 0x01, 0xe4, 0x7a, 0x70, 0x05, 0xf4, 0xce, 0xc1, 0x9e, 0xa1, 0xe1, 0x2a, 0x14, 0xc9, 0xe1,
-	0x93, 0x9e, 0x81, 0xec, 0x65, 0x68, 0x24, 0xea, 0x79, 0x18, 0xf8, 0x9c, 0xd9, 0x7f, 0x23, 0x80,
-	0xfc, 0x76, 0x70, 0x07, 0xca, 0xaa, 0x73, 0x7a, 0x87, 0xff, 0xcb, 0x85, 0xab, 0x7e, 0x07, 0xd4,
-	0x89, 0xba, 0x2b, 0xc9, 0x15, 0xd6, 0x15, 0xd4, 0x39, 0xa6, 0xa1, 0x60, 0x11, 0x49, 0x88, 0xf8,
-	0x33, 0xa8, 0x70, 0xea, 0x85, 0x2e, 0xe3, 0x66, 0x41, 0xd5, 0x30, 0xf2, 0x1a, 0x43, 0x15, 0x50,
-	0x87, 0xd6, 0x48, 0x9a, 0x86, 0xbf, 0x84, 0x1a, 0x7b, 0xc1, 0xbc, 0xd0, 0xa5, 0x11, 0x4f, 0x2e,
-	0x0c, 0xe7, 0x9c, 0x5e, 0x12, 0x4a, 0x58, 0x79, 0x2a, 0x7e, 0x00, 0x30, 0x75, 0xb8, 0x08, 0x4e,
-	0x23, 0xea, 0x71, 0xb3, 0xf8, 0xa6, 0xe0, 0xdd, 0x34, 0x96, 0x30, 0x17, 0x92, 0xed, 0x2f, 0xa0,
-	0x96, 0x9d, 0x07, 0x63, 0x28, 0xca, 0x8b, 0x36, 0x51, 0x0b, 0xb5, 0xeb, 0x44, 0xad, 0xf1, 0x0a,
-	0x94, 0xce, 0xa8, 0x3b, 0x8b, 0xa7, 0x5f, 0x27, 0xf1, 0xc6, 0xee, 0x40, 0x39, 0x3e, 0x42, 0x1e,
-	0x97, 0x24, 0x94, 0xc4, 0xf1, 0x3a, 0xd4, 0x95, 0x85, 0x04, 0xf5, 0xc2, 0xb1, 0xc7, 0x15, 0x59,
-	0x27, 0x4b, 0x19, 0xd6, 0xe7, 0xf6, 0xaf, 0x05, 0x68, 0xde, 0xf6, 0x00, 0xfe, 0x0a, 0x8a, 0xe2,
-	0x3c, 0x8c, 0x4b, 0x35, 0x37, 0xdf, 0x7f, 0x9b, 0x57, 0x92, 0xed, 0xe8, 0x3c, 0x64, 0x44, 0x11,
-	0xf0, 0x27, 0x80, 0x3d, 0x85, 0x8d, 0x4f, 0xa8, 0xe7, 0xb8, 0xe7, 0xca, 0x2f, 0xaa, 0x69, 0x8d,
-	0x18, 0x71, 0x64, 0x5b, 0x05, 0xa4, 0x4d, 0xe4, 0x31, 0xa7, 0xcc, 0x0d, 0xcd, 0xa2, 0x8a, 0xab,
-	0xb5, 0xc4, 0x66, 0xbe, 0x23, 0xcc, 0x52, 0x8c, 0xc9, 0xb5, 0x7d, 0x0e, 0x90, 0x77, 0xc2, 0x4b,
-	0x50, 0x39, 0x1c, 0x3c, 0x1e, 0xec, 0x3f, 0x1d, 0x18, 0x9a, 0xdc, 0x3c, 0xda, 0x3f, 0x1c, 0x8c,
-	0x7a, 0xc4, 0x40, 0xb8, 0x06, 0xa5, 0x9d, 0xce, 0xe1, 0x4e, 0xcf, 0x28, 0xe0, 0x06, 0xd4, 0x76,
-	0xf7, 0x86, 0xa3, 0xfd, 0x1d, 0xd2, 0xe9, 0x1b, 0x3a, 0xc6, 0xd0, 0x54, 0x91, 0x1c, 0x2b, 0x4a,
-	0xea, 0xf0, 0xb0, 0xdf, 0xef, 0x90, 0x67, 0x46, 0x49, 0x1a, 0x72, 0x6f, 0xb0, 0xbd, 0x6f, 0x94,
-	0x71, 0x1d, 0xaa, 0xc3, 0x51, 0x67, 0xd4, 0x1b, 0xf6, 0x46, 0x46, 0xc5, 0x7e, 0x0c, 0xe5, 0xb8,
-	0xf5, 0x3b, 0x30, 0xa2, 0xfd, 0x13, 0x82, 0x6a, 0x6a, 0x9e, 0x77, 0x61, 0xec, 0x5b, 0x96, 0x78,
-	0xeb, 0xc8, 0xf5, 0xbb, 0x23, 0xbf, 0x2c, 0x41, 0x2d, 0x33, 0x23, 0xbe, 0x0f, 0xb5, 0x49, 0x30,
-	0xf3, 0xc5, 0xd8, 0xf1, 0x85, 0x1a, 0x79, 0x71, 0x57, 0x23, 0x55, 0x05, 0xed, 0xf9, 0x02, 0xaf,
-	0xc3, 0x52, 0x1c, 0x3e, 0x71, 0x03, 0x2a, 0xe2, 0x5e, 0xbb, 0x1a, 0x01, 0x05, 0x6e, 0x4b, 0x0c,
-	0x1b, 0xa0, 0xf3, 0x99, 0xa7, 0x3a, 0x21, 0x22, 0x97, 0xf8, 0xff, 0x50, 0xe6, 0x93, 0x29, 0xf3,
-	0xa8, 0x1a, 0xee, 0x3d, 0x92, 0xec, 0xf0, 0x07, 0xd0, 0xfc, 0x81, 0x45, 0xc1, 0x58, 0x4c, 0x23,
-	0xc6, 0xa7, 0x81, 0x7b, 0xac, 0x06, 0x8d, 0x48, 0x43, 0xa2, 0xa3, 0x14, 0xc4, 0x1f, 0x26, 0x69,
-	0xb9, 0xae, 0xb2, 0xd2, 0x85, 0x48, 0x5d, 0xe2, 0x8f, 0x52, 0x6d, 0x1f, 0x83, 0xb1, 0x90, 0x17,
-	0x0b, 0xac, 0x28, 0x81, 0x88, 0x34, 0xb3, 0xcc, 0x58, 0x64, 0x07, 0x9a, 0x3e, 0x3b, 0xa5, 0xc2,
-	0x39, 0x63, 0x63, 0x1e, 0x52, 0x9f, 0x9b, 0xd5, 0x37, 0x5f, 0xe5, 0xee, 0x6c, 0xf2, 0x9c, 0x89,
-	0x61, 0x48, 0xfd, 0xe4, 0x0b, 0x6d, 0xa4, 0x0c, 0x89, 0x71, 0xfc, 0x11, 0x2c, 0x67, 0x25, 0x8e,
-	0x99, 0x2b, 0x28, 0x37, 0x6b, 0x2d, 0xbd, 0x8d, 0x49, 0x56, 0xf9, 0x1b, 0x85, 0xde, 0x4a, 0x54,
-	0xda, 0xb8, 0x09, 0x2d, 0xbd, 0x8d, 0xf2, 0x44, 0x25, 0x4c, 0x3e, 0x6f, 0xcd, 0x30, 0xe0, 0xce,
-	0x82, 0xa8, 0xa5, 0xff, 0x16, 0x95, 0x32, 0x32, 0x51, 0x59, 0x89, 0x44, 0x54, 0x3d, 0x16, 0x95,
-	0xc2, 0xb9, 0xa8, 0x2c, 0x31, 0x11, 0xd5, 0x88, 0x45, 0xa5, 0x70, 0x22, 0x6a, 0x0b, 0x20, 0x62,
-	0x9c, 0x89, 0xf1, 0x54, 0xde, 0x7c, 0x53, 0x3d, 0x02, 0xf7, 0xff, 0xe5, 0x19, 0xdb, 0x20, 0x32,
-	0x6b, 0xd7, 0xf1, 0x05, 0xa9, 0x45, 0xe9, 0xf2, 0x8e, 0xff, 0x96, 0xef, 0xfa, 0xef, 0x21, 0xd4,
-	0x32, 0xea, 0xed, 0xef, 0xb9, 0x02, 0xfa, 0xb3, 0xde, 0xd0, 0x40, 0xb8, 0x0c, 0x85, 0xc1, 0xbe,
-	0x51, 0xc8, 0xbf, 0x69, 0x7d, 0xb5, 0xf8, 0xf3, 0x6f, 0x16, 0xea, 0x56, 0xa0, 0xa4, 0xc4, 0x77,
-	0xeb, 0x00, 0xf9, 0xec, 0xed, 0x2d, 0x80, 0xfc, 0xa2, 0xa4, 0xfd, 0x82, 0x93, 0x13, 0xce, 0x62,
-	0x3f, 0xdf, 0x23, 0xc9, 0x4e, 0xe2, 0x2e, 0xf3, 0x4f, 0xc5, 0x54, 0xd9, 0xb8, 0x41, 0x92, 0x5d,
-	0xf7, 0xeb, 0xcb, 0x6b, 0x4b, 0x7b, 0x79, 0x6d, 0x69, 0xaf, 0xaf, 0x2d, 0xf4, 0xe3, 0xdc, 0x42,
-	0xbf, 0xcf, 0x2d, 0x74, 0x31, 0xb7, 0xd0, 0xe5, 0xdc, 0x42, 0x7f, 0xcd, 0x2d, 0xf4, 0x6a, 0x6e,
-	0x69, 0xaf, 0xe7, 0x16, 0xfa, 0xe5, 0xc6, 0xd2, 0x2e, 0x6f, 0x2c, 0xed, 0xe5, 0x8d, 0xa5, 0x7d,
-	0x97, 0xfd, 0x14, 0x1c, 0x95, 0xd5, 0x5f, 0xc0, 0xe7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x4b,
-	0xb6, 0xdb, 0xd4, 0x35, 0x08, 0x00, 0x00,
-}
-
-func (x WriteRequest_SourceEnum) String() string {
-	s, ok := WriteRequest_SourceEnum_name[int32(x)]
+	// 1256 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4d, 0x6f, 0x1b, 0x45,
+	0x18, 0xf6, 0x78, 0xfd, 0xb5, 0xaf, 0x3f, 0xb2, 0x1d, 0xa2, 0x62, 0xa2, 0x76, 0xe3, 0xae, 0x04,
+	0x58, 0xa5, 0x0a, 0xc8, 0x88, 0x0a, 0xaa, 0x0a, 0xc9, 0x2e, 0x6e, 0x13, 0xda, 0x38, 0xd1, 0xd8,
+	0x49, 0x55, 0x2e, 0xd6, 0xc6, 0x19, 0xc7, 0xab, 0xee, 0x17, 0x3b, 0xe3, 0xaa, 0xe1, 0xc4, 0x09,
+	0xf5, 0xd8, 0xff, 0xc0, 0x85, 0xff, 0xc0, 0x1f, 0xe8, 0x31, 0xc7, 0x8a, 0x43, 0x45, 0x9d, 0x4b,
+	0x8f, 0x15, 0xe2, 0x07, 0xa0, 0x99, 0xfd, 0x74, 0x52, 0xa9, 0x14, 0x7a, 0x9b, 0x79, 0xde, 0xf7,
+	0x9d, 0x79, 0xde, 0x8f, 0x67, 0x76, 0xa1, 0x36, 0xf1, 0x02, 0x4e, 0x1f, 0x6f, 0xf8, 0x81, 0xc7,
+	0x3d, 0x5c, 0x09, 0x77, 0xfe, 0xc1, 0xda, 0xea, 0x91, 0x77, 0xe4, 0x49, 0xf0, 0x73, 0xb1, 0x0a,
+	0xed, 0xc6, 0x5f, 0x08, 0x1a, 0xf7, 0x03, 0x8b, 0x53, 0x42, 0x7f, 0x9c, 0x53, 0xc6, 0xf7, 0x3b,
+	0xf8, 0x1a, 0x94, 0x86, 0xde, 0x3c, 0x98, 0xd0, 0xa6, 0xd2, 0x42, 0xed, 0x46, 0x67, 0x75, 0x23,
+	0x3e, 0x63, 0x23, 0xc4, 0xfb, 0xee, 0xdc, 0x21, 0x91, 0x0f, 0x6e, 0x42, 0x99, 0x1d, 0x3b, 0x07,
+	0x9e, 0xcd, 0x9a, 0x85, 0x96, 0xd2, 0x56, 0x49, 0xbc, 0xc5, 0x23, 0x00, 0x6e, 0x39, 0x94, 0xd1,
+	0xc0, 0xa2, 0xac, 0x59, 0x6c, 0x29, 0xed, 0x6a, 0xe7, 0x62, 0x7a, 0xd6, 0xc8, 0x72, 0xe8, 0x50,
+	0xda, 0xf6, 0x3b, 0xbd, 0x4b, 0xcf, 0x5e, 0xac, 0xe7, 0xfe, 0x78, 0xb1, 0xbe, 0xba, 0x1b, 0x50,
+	0xd3, 0xb6, 0xbd, 0xc9, 0x28, 0x89, 0xdc, 0xef, 0x90, 0xcc, 0x39, 0xf8, 0x26, 0xac, 0xb1, 0x87,
+	0x96, 0x3f, 0xb6, 0xcd, 0x03, 0x6a, 0x8f, 0x5d, 0xd3, 0xa1, 0xe3, 0x47, 0xa6, 0x6d, 0x1d, 0x9a,
+	0xdc, 0xf2, 0xdc, 0xe6, 0xab, 0x72, 0x0b, 0xb5, 0x2b, 0xe4, 0x43, 0xe1, 0x72, 0x4f, 0x78, 0x0c,
+	0x4c, 0x87, 0xee, 0x27, 0xf6, 0xef, 0x0b, 0x15, 0xa4, 0x29, 0x86, 0x05, 0x2b, 0x51, 0xce, 0xcc,
+	0xf7, 0x5c, 0x46, 0xf7, 0x3b, 0x22, 0x8d, 0xa1, 0xe9, 0xf8, 0x36, 0x65, 0x4d, 0xd4, 0x42, 0x6d,
+	0x85, 0xc4, 0x5b, 0xac, 0x03, 0x6c, 0x5a, 0x8c, 0x7b, 0x47, 0x81, 0xe9, 0xb0, 0x66, 0x5e, 0x1a,
+	0x33, 0x08, 0xbe, 0x04, 0x6a, 0xff, 0x31, 0x75, 0x7c, 0xdb, 0x0c, 0x98, 0xac, 0x98, 0x42, 0x52,
+	0xc0, 0xf8, 0x3d, 0x0f, 0xb5, 0x6c, 0xa6, 0x78, 0x1d, 0xaa, 0x92, 0x3a, 0x1b, 0x07, 0x74, 0x2a,
+	0x2e, 0x53, 0xda, 0x75, 0x02, 0x21, 0x44, 0xe8, 0x94, 0xe1, 0x2f, 0xa0, 0xcc, 0x22, 0x26, 0x79,
+	0x59, 0x33, 0x2d, 0x53, 0x7f, 0x69, 0xe8, 0x15, 0x44, 0xb5, 0x48, 0xec, 0x86, 0xbf, 0x01, 0x98,
+	0xa5, 0x0c, 0x15, 0x19, 0xf4, 0x41, 0x1a, 0x94, 0x70, 0x8d, 0xe2, 0x32, 0xce, 0xf8, 0x6b, 0x50,
+	0x69, 0x42, 0xbe, 0x20, 0x23, 0x33, 0xed, 0x8e, 0xd3, 0xd8, 0xef, 0x44, 0xa1, 0xa9, 0x33, 0xbe,
+	0x0e, 0x15, 0x87, 0x72, 0xf3, 0xd0, 0xe4, 0x66, 0xb3, 0xd8, 0x42, 0xcb, 0x81, 0xdb, 0x91, 0x25,
+	0x09, 0x4c, 0x7c, 0xf1, 0x67, 0x70, 0x61, 0x12, 0x50, 0x93, 0xd3, 0xc3, 0xb1, 0xec, 0x2a, 0x37,
+	0x1d, 0xbf, 0x59, 0x92, 0x65, 0xd3, 0x22, 0xc3, 0x28, 0xc6, 0x0d, 0x13, 0x20, 0xe5, 0xf0, 0xf6,
+	0xd2, 0xad, 0x42, 0xf1, 0x91, 0x69, 0xcf, 0xa9, 0xec, 0x12, 0x22, 0xe1, 0x46, 0x34, 0x28, 0xbd,
+	0x29, 0x6a, 0x50, 0x02, 0x18, 0x36, 0x40, 0xca, 0x16, 0xb7, 0xa1, 0xc0, 0x8f, 0x7d, 0x2a, 0x67,
+	0xa0, 0x71, 0x26, 0xa3, 0xc0, 0x9a, 0x8c, 0x8e, 0x7d, 0x4a, 0xa4, 0x07, 0xfe, 0x08, 0x2a, 0x33,
+	0x6a, 0xfb, 0x82, 0x8a, 0x3c, 0xb4, 0x4e, 0xca, 0x62, 0x4f, 0xe8, 0x54, 0x98, 0xe6, 0xae, 0xc5,
+	0xa5, 0xa9, 0x10, 0x9a, 0xc4, 0x9e, 0xd0, 0xa9, 0xf1, 0x24, 0x0f, 0xb5, 0xac, 0xdc, 0xf0, 0xee,
+	0x92, 0x48, 0xd0, 0xd9, 0x0e, 0xa4, 0xa3, 0xd3, 0x5b, 0x8b, 0x24, 0x82, 0xcf, 0x4b, 0x64, 0x49,
+	0x20, 0xa9, 0x7c, 0xf3, 0xff, 0x42, 0xbe, 0x37, 0x32, 0x6d, 0x0c, 0x27, 0xa7, 0x79, 0x36, 0xe9,
+	0xb8, 0x3c, 0xb2, 0x95, 0x28, 0xd3, 0xca, 0xff, 0x25, 0x45, 0x63, 0x05, 0xea, 0x4b, 0x22, 0x34,
+	0xfe, 0x46, 0x00, 0x69, 0xbe, 0xb8, 0x0b, 0xa5, 0xb0, 0xb5, 0x51, 0x55, 0x32, 0x13, 0x2d, 0x8f,
+	0xdb, 0x35, 0xad, 0xa0, 0xb7, 0x1a, 0x15, 0xa5, 0x26, 0xa1, 0xee, 0xa1, 0xe9, 0x73, 0x1a, 0x90,
+	0x28, 0xf0, 0x3f, 0x48, 0xe9, 0x7a, 0x56, 0x0f, 0x61, 0x3d, 0xf0, 0x79, 0x3d, 0x9c, 0x57, 0xc3,
+	0xb2, 0x04, 0x0b, 0xef, 0x20, 0x41, 0xe3, 0x2b, 0x50, 0x93, 0x7c, 0x30, 0x86, 0x82, 0xa8, 0xa3,
+	0x9c, 0xbf, 0x1a, 0x91, 0xeb, 0xe5, 0xa9, 0xae, 0x45, 0x53, 0x6d, 0x74, 0xa1, 0x14, 0xa6, 0x90,
+	0xda, 0x51, 0x76, 0xea, 0xaf, 0x40, 0x2d, 0x19, 0xf2, 0x71, 0xf2, 0x70, 0x55, 0x13, 0x6c, 0x9b,
+	0x19, 0x4f, 0x11, 0x34, 0x96, 0x5b, 0xfc, 0x0e, 0xf3, 0x7f, 0x0d, 0xb0, 0x23, 0xb1, 0xf1, 0xd4,
+	0x74, 0x2c, 0xfb, 0x58, 0xf6, 0x5f, 0xde, 0xa2, 0x12, 0x2d, 0xb4, 0xdc, 0x96, 0x06, 0xd1, 0x76,
+	0x91, 0x97, 0x50, 0x87, 0x94, 0x83, 0x4a, 0xe4, 0x5a, 0x60, 0x42, 0x16, 0xf2, 0xf5, 0x50, 0x89,
+	0x5c, 0x1b, 0x77, 0xa1, 0x14, 0xde, 0xf4, 0x1e, 0xda, 0x6f, 0xfc, 0x82, 0xa0, 0x12, 0xb7, 0xec,
+	0x7d, 0x8c, 0xd3, 0x9b, 0x9f, 0x97, 0xb3, 0x85, 0x56, 0xce, 0x17, 0xfa, 0xa4, 0x08, 0x6a, 0x32,
+	0x02, 0xf8, 0x32, 0xa8, 0x13, 0x6f, 0xee, 0xf2, 0xb1, 0xe5, 0x72, 0x59, 0xe8, 0xc2, 0x66, 0x8e,
+	0x54, 0x24, 0xb4, 0xe5, 0x72, 0x7c, 0x05, 0xaa, 0xa1, 0x79, 0x6a, 0x7b, 0x26, 0x0f, 0xef, 0xda,
+	0xcc, 0x11, 0x90, 0xe0, 0x6d, 0x81, 0x61, 0x0d, 0x14, 0x36, 0x77, 0xe4, 0x4d, 0x88, 0x88, 0x25,
+	0xbe, 0x08, 0x25, 0x36, 0x99, 0x51, 0xc7, 0x94, 0x15, 0xbe, 0x40, 0xa2, 0x1d, 0xfe, 0x18, 0x1a,
+	0x3f, 0xd1, 0xc0, 0x1b, 0xf3, 0x59, 0x40, 0xd9, 0xcc, 0xb3, 0x0f, 0x65, 0xb5, 0x11, 0xa9, 0x0b,
+	0x74, 0x14, 0x83, 0xf8, 0x93, 0xc8, 0x2d, 0xe5, 0x55, 0x92, 0xbc, 0x10, 0xa9, 0x09, 0xfc, 0x56,
+	0xcc, 0xed, 0x2a, 0x68, 0x19, 0xbf, 0x90, 0x60, 0x59, 0x12, 0x44, 0xa4, 0x91, 0x78, 0x86, 0x24,
+	0xbb, 0xd0, 0x70, 0xe9, 0x91, 0xc9, 0xad, 0x47, 0x74, 0xcc, 0x7c, 0xd3, 0x65, 0xcd, 0xca, 0xd9,
+	0xd7, 0xad, 0x37, 0x9f, 0x3c, 0xa4, 0x7c, 0xe8, 0x9b, 0x6e, 0xa4, 0x8b, 0x7a, 0x1c, 0x21, 0x30,
+	0x86, 0x3f, 0x85, 0x95, 0xe4, 0x88, 0x43, 0x6a, 0x73, 0x93, 0x35, 0xd5, 0x96, 0xd2, 0xc6, 0x24,
+	0x39, 0xf9, 0x3b, 0x89, 0x2e, 0x39, 0x4a, 0x6e, 0xac, 0x09, 0x2d, 0xa5, 0x8d, 0x52, 0x47, 0x49,
+	0x4c, 0x3c, 0x2a, 0x0d, 0xdf, 0x63, 0x56, 0x86, 0x54, 0xf5, 0xed, 0xa4, 0xe2, 0x88, 0x84, 0x54,
+	0x72, 0x44, 0x44, 0xaa, 0x16, 0x92, 0x8a, 0xe1, 0x94, 0x54, 0xe2, 0x18, 0x91, 0xaa, 0x87, 0xa4,
+	0x62, 0x38, 0x22, 0x75, 0x13, 0x20, 0xa0, 0x8c, 0xf2, 0xf1, 0x4c, 0x54, 0xbe, 0x21, 0xa5, 0x77,
+	0xf9, 0x0d, 0x8f, 0xc7, 0x06, 0x11, 0x5e, 0x9b, 0x96, 0xcb, 0x89, 0x1a, 0xc4, 0xcb, 0x73, 0xf3,
+	0xb7, 0x72, 0x7e, 0xfe, 0x6e, 0x80, 0x9a, 0x84, 0xe2, 0x2a, 0x94, 0xf7, 0x06, 0x77, 0x07, 0x3b,
+	0xf7, 0x07, 0x5a, 0x0e, 0x97, 0x41, 0x79, 0xd0, 0x1f, 0x6a, 0x08, 0x97, 0x20, 0x3f, 0xd8, 0xd1,
+	0xf2, 0x58, 0x85, 0xe2, 0x9d, 0xee, 0xde, 0x9d, 0xbe, 0xa6, 0xac, 0x15, 0x9e, 0xfc, 0xaa, 0xa3,
+	0x5e, 0x19, 0x8a, 0x92, 0x7c, 0xaf, 0x06, 0x90, 0xf6, 0xde, 0xb8, 0x09, 0x90, 0x16, 0x4a, 0x8c,
+	0x9f, 0x37, 0x9d, 0x32, 0x1a, 0xce, 0xf3, 0x05, 0x12, 0xed, 0x04, 0x6e, 0x53, 0xf7, 0x88, 0xcf,
+	0xe4, 0x18, 0xd7, 0x49, 0xb4, 0xbb, 0xba, 0x0e, 0x90, 0x7e, 0x8b, 0x04, 0x89, 0xee, 0xee, 0x96,
+	0x96, 0xc3, 0x15, 0x28, 0x90, 0xbd, 0x7b, 0x7d, 0x0d, 0x5d, 0x3d, 0x96, 0x5f, 0xe5, 0xe8, 0xc5,
+	0x59, 0xa6, 0x5c, 0x85, 0xf2, 0xad, 0x9d, 0xbd, 0xc1, 0xa8, 0x4f, 0x34, 0x94, 0xd2, 0xcd, 0xe3,
+	0x3a, 0xa8, 0x9b, 0x5b, 0xc3, 0xd1, 0xce, 0x1d, 0xd2, 0xdd, 0xd6, 0x14, 0x8c, 0xa1, 0x21, 0x2d,
+	0x29, 0x56, 0x10, 0xa1, 0xc3, 0xbd, 0xed, 0xed, 0x2e, 0x79, 0xa0, 0x15, 0xc5, 0x65, 0x5b, 0x83,
+	0xdb, 0x3b, 0x5a, 0x09, 0xd7, 0xa0, 0x32, 0x1c, 0x75, 0x47, 0xfd, 0x61, 0x7f, 0xa4, 0x95, 0x7b,
+	0xdf, 0x9e, 0xbc, 0xd4, 0x73, 0xcf, 0x5f, 0xea, 0xb9, 0xd7, 0x2f, 0x75, 0xf4, 0xf3, 0x42, 0x47,
+	0xbf, 0x2d, 0x74, 0xf4, 0x6c, 0xa1, 0xa3, 0x93, 0x85, 0x8e, 0xfe, 0x5c, 0xe8, 0xe8, 0xd5, 0x42,
+	0xcf, 0xbd, 0x5e, 0xe8, 0xe8, 0xe9, 0xa9, 0x9e, 0x3b, 0x39, 0xd5, 0x73, 0xcf, 0x4f, 0xf5, 0xdc,
+	0x0f, 0xc9, 0x7f, 0xf6, 0x41, 0x49, 0xfe, 0x58, 0x7f, 0xf9, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff,
+	0x2f, 0x9f, 0x19, 0xc5, 0x88, 0x0b, 0x00, 0x00,
+}
+
+func (x SourceEnum) String() string {
+	s, ok := SourceEnum_name[int32(x)]
 	if ok {
 		return s
 	}
 	return strconv.Itoa(int(x))
 }
-func (x MetricMetadata_MetricType) String() string {
-	s, ok := MetricMetadata_MetricType_name[int32(x)]
+func (x MetricType) String() string {
+	s, ok := MetricType_name[int32(x)]
 	if ok {
 		return s
 	}
@@ -931,14 +1317,14 @@ func (x Histogram_ResetHint) String() string {
 	}
 	return strconv.Itoa(int(x))
 }
-func (this *WriteRequest) Equal(that interface{}) bool {
+func (this *WriteRequestV2) Equal(that interface{}) bool {
 	if that == nil {
 		return this == nil
 	}
 
-	that1, ok := that.(*WriteRequest)
+	that1, ok := that.(*WriteRequestV2)
 	if !ok {
-		that2, ok := that.(WriteRequest)
+		that2, ok := that.(WriteRequestV2)
 		if ok {
 			that1 = &that2
 		} else {
@@ -950,22 +1336,22 @@ func (this *WriteRequest) Equal(that interface{}) bool {
 	} else if this == nil {
 		return false
 	}
-	if len(this.Timeseries) != len(that1.Timeseries) {
+	if this.Source != that1.Source {
 		return false
 	}
-	for i := range this.Timeseries {
-		if !this.Timeseries[i].Equal(that1.Timeseries[i]) {
+	if len(this.Symbols) != len(that1.Symbols) {
+		return false
+	}
+	for i := range this.Symbols {
+		if this.Symbols[i] != that1.Symbols[i] {
 			return false
 		}
 	}
-	if this.Source != that1.Source {
-		return false
-	}
-	if len(this.Metadata) != len(that1.Metadata) {
+	if len(this.Timeseries) != len(that1.Timeseries) {
 		return false
 	}
-	for i := range this.Metadata {
-		if !this.Metadata[i].Equal(that1.Metadata[i]) {
+	for i := range this.Timeseries {
+		if !this.Timeseries[i].Equal(that1.Timeseries[i]) {
 			return false
 		}
 	}
@@ -974,14 +1360,14 @@ func (this *WriteRequest) Equal(that interface{}) bool {
 	}
 	return true
 }
-func (this *WriteResponse) Equal(that interface{}) bool {
+func (this *WriteResponseV2) Equal(that interface{}) bool {
 	if that == nil {
 		return this == nil
 	}
 
-	that1, ok := that.(*WriteResponse)
+	that1, ok := that.(*WriteResponseV2)
 	if !ok {
-		that2, ok := that.(WriteResponse)
+		that2, ok := that.(WriteResponseV2)
 		if ok {
 			that1 = &that2
 		} else {
@@ -993,8 +1379,205 @@ func (this *WriteResponse) Equal(that interface{}) bool {
 	} else if this == nil {
 		return false
 	}
-	return true
-}
+	if this.Samples != that1.Samples {
+		return false
+	}
+	if this.Histograms != that1.Histograms {
+		return false
+	}
+	if this.Exemplars != that1.Exemplars {
+		return false
+	}
+	return true
+}
+func (this *TimeSeriesV2) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*TimeSeriesV2)
+	if !ok {
+		that2, ok := that.(TimeSeriesV2)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if len(this.LabelsRefs) != len(that1.LabelsRefs) {
+		return false
+	}
+	for i := range this.LabelsRefs {
+		if this.LabelsRefs[i] != that1.LabelsRefs[i] {
+			return false
+		}
+	}
+	if len(this.Samples) != len(that1.Samples) {
+		return false
+	}
+	for i := range this.Samples {
+		if !this.Samples[i].Equal(&that1.Samples[i]) {
+			return false
+		}
+	}
+	if len(this.Histograms) != len(that1.Histograms) {
+		return false
+	}
+	for i := range this.Histograms {
+		if !this.Histograms[i].Equal(&that1.Histograms[i]) {
+			return false
+		}
+	}
+	if len(this.Exemplars) != len(that1.Exemplars) {
+		return false
+	}
+	for i := range this.Exemplars {
+		if !this.Exemplars[i].Equal(&that1.Exemplars[i]) {
+			return false
+		}
+	}
+	if !this.Metadata.Equal(&that1.Metadata) {
+		return false
+	}
+	if this.CreatedTimestamp != that1.CreatedTimestamp {
+		return false
+	}
+	return true
+}
+func (this *ExemplarV2) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*ExemplarV2)
+	if !ok {
+		that2, ok := that.(ExemplarV2)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if len(this.LabelsRefs) != len(that1.LabelsRefs) {
+		return false
+	}
+	for i := range this.LabelsRefs {
+		if this.LabelsRefs[i] != that1.LabelsRefs[i] {
+			return false
+		}
+	}
+	if this.Value != that1.Value {
+		return false
+	}
+	if this.Timestamp != that1.Timestamp {
+		return false
+	}
+	return true
+}
+func (this *MetadataV2) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*MetadataV2)
+	if !ok {
+		that2, ok := that.(MetadataV2)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if this.Type != that1.Type {
+		return false
+	}
+	if this.HelpRef != that1.HelpRef {
+		return false
+	}
+	if this.UnitRef != that1.UnitRef {
+		return false
+	}
+	return true
+}
+func (this *WriteRequest) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*WriteRequest)
+	if !ok {
+		that2, ok := that.(WriteRequest)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if len(this.Timeseries) != len(that1.Timeseries) {
+		return false
+	}
+	for i := range this.Timeseries {
+		if !this.Timeseries[i].Equal(that1.Timeseries[i]) {
+			return false
+		}
+	}
+	if this.Source != that1.Source {
+		return false
+	}
+	if len(this.Metadata) != len(that1.Metadata) {
+		return false
+	}
+	for i := range this.Metadata {
+		if !this.Metadata[i].Equal(that1.Metadata[i]) {
+			return false
+		}
+	}
+	if this.SkipLabelNameValidation != that1.SkipLabelNameValidation {
+		return false
+	}
+	return true
+}
+func (this *WriteResponse) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*WriteResponse)
+	if !ok {
+		that2, ok := that.(WriteResponse)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	return true
+}
 func (this *TimeSeries) Equal(that interface{}) bool {
 	if that == nil {
 		return this == nil
@@ -1424,6 +2007,88 @@ func (this *BucketSpan) Equal(that interface{}) bool {
 	}
 	return true
 }
+func (this *WriteRequestV2) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 8)
+	s = append(s, "&cortexpb.WriteRequestV2{")
+	s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n")
+	s = append(s, "Symbols: "+fmt.Sprintf("%#v", this.Symbols)+",\n")
+	s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n")
+	s = append(s, "SkipLabelNameValidation: "+fmt.Sprintf("%#v", this.SkipLabelNameValidation)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *WriteResponseV2) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&cortexpb.WriteResponseV2{")
+	s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n")
+	s = append(s, "Histograms: "+fmt.Sprintf("%#v", this.Histograms)+",\n")
+	s = append(s, "Exemplars: "+fmt.Sprintf("%#v", this.Exemplars)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *TimeSeriesV2) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 10)
+	s = append(s, "&cortexpb.TimeSeriesV2{")
+	s = append(s, "LabelsRefs: "+fmt.Sprintf("%#v", this.LabelsRefs)+",\n")
+	if this.Samples != nil {
+		vs := make([]*Sample, len(this.Samples))
+		for i := range vs {
+			vs[i] = &this.Samples[i]
+		}
+		s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n")
+	}
+	if this.Histograms != nil {
+		vs := make([]*Histogram, len(this.Histograms))
+		for i := range vs {
+			vs[i] = &this.Histograms[i]
+		}
+		s = append(s, "Histograms: "+fmt.Sprintf("%#v", vs)+",\n")
+	}
+	if this.Exemplars != nil {
+		vs := make([]*ExemplarV2, len(this.Exemplars))
+		for i := range vs {
+			vs[i] = &this.Exemplars[i]
+		}
+		s = append(s, "Exemplars: "+fmt.Sprintf("%#v", vs)+",\n")
+	}
+	s = append(s, "Metadata: "+strings.Replace(this.Metadata.GoString(), `&`, ``, 1)+",\n")
+	s = append(s, "CreatedTimestamp: "+fmt.Sprintf("%#v", this.CreatedTimestamp)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *ExemplarV2) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&cortexpb.ExemplarV2{")
+	s = append(s, "LabelsRefs: "+fmt.Sprintf("%#v", this.LabelsRefs)+",\n")
+	s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+	s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *MetadataV2) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&cortexpb.MetadataV2{")
+	s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
+	s = append(s, "HelpRef: "+fmt.Sprintf("%#v", this.HelpRef)+",\n")
+	s = append(s, "UnitRef: "+fmt.Sprintf("%#v", this.UnitRef)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
 func (this *WriteRequest) GoString() string {
 	if this == nil {
 		return "nil"
@@ -1625,7 +2290,7 @@ func valueToGoStringCortex(v interface{}, typ string) string {
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
 }
-func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
+func (m *WriteRequestV2) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1635,12 +2300,12 @@ func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) {
+func (m *WriteRequestV2) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *WriteRequestV2) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
@@ -1657,26 +2322,315 @@ func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 		i--
 		dAtA[i] = 0xc0
 	}
-	if len(m.Metadata) > 0 {
-		for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- {
+	if len(m.Timeseries) > 0 {
+		for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
 			{
-				size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
+				size := m.Timeseries[iNdEx].Size()
+				i -= size
+				if _, err := m.Timeseries[iNdEx].MarshalTo(dAtA[i:]); err != nil {
 					return 0, err
 				}
-				i -= size
 				i = encodeVarintCortex(dAtA, i, uint64(size))
 			}
 			i--
-			dAtA[i] = 0x1a
+			dAtA[i] = 0x2a
+		}
+	}
+	if len(m.Symbols) > 0 {
+		for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Symbols[iNdEx])
+			copy(dAtA[i:], m.Symbols[iNdEx])
+			i = encodeVarintCortex(dAtA, i, uint64(len(m.Symbols[iNdEx])))
+			i--
+			dAtA[i] = 0x22
 		}
 	}
 	if m.Source != 0 {
 		i = encodeVarintCortex(dAtA, i, uint64(m.Source))
 		i--
-		dAtA[i] = 0x10
+		dAtA[i] = 0x18
 	}
-	if len(m.Timeseries) > 0 {
+	return len(dAtA) - i, nil
+}
+
+func (m *WriteResponseV2) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WriteResponseV2) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WriteResponseV2) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Exemplars != 0 {
+		i = encodeVarintCortex(dAtA, i, uint64(m.Exemplars))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.Histograms != 0 {
+		i = encodeVarintCortex(dAtA, i, uint64(m.Histograms))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Samples != 0 {
+		i = encodeVarintCortex(dAtA, i, uint64(m.Samples))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *TimeSeriesV2) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TimeSeriesV2) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TimeSeriesV2) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.CreatedTimestamp != 0 {
+		i = encodeVarintCortex(dAtA, i, uint64(m.CreatedTimestamp))
+		i--
+		dAtA[i] = 0x30
+	}
+	{
+		size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintCortex(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x2a
+	if len(m.Exemplars) > 0 {
+		for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintCortex(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if len(m.Histograms) > 0 {
+		for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintCortex(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Samples) > 0 {
+		for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintCortex(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.LabelsRefs) > 0 {
+		dAtA3 := make([]byte, len(m.LabelsRefs)*10)
+		var j2 int
+		for _, num := range m.LabelsRefs {
+			for num >= 1<<7 {
+				dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80)
+				num >>= 7
+				j2++
+			}
+			dAtA3[j2] = uint8(num)
+			j2++
+		}
+		i -= j2
+		copy(dAtA[i:], dAtA3[:j2])
+		i = encodeVarintCortex(dAtA, i, uint64(j2))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ExemplarV2) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ExemplarV2) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExemplarV2) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Timestamp != 0 {
+		i = encodeVarintCortex(dAtA, i, uint64(m.Timestamp))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.Value != 0 {
+		i -= 8
+		encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
+		i--
+		dAtA[i] = 0x11
+	}
+	if len(m.LabelsRefs) > 0 {
+		dAtA5 := make([]byte, len(m.LabelsRefs)*10)
+		var j4 int
+		for _, num := range m.LabelsRefs {
+			for num >= 1<<7 {
+				dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80)
+				num >>= 7
+				j4++
+			}
+			dAtA5[j4] = uint8(num)
+			j4++
+		}
+		i -= j4
+		copy(dAtA[i:], dAtA5[:j4])
+		i = encodeVarintCortex(dAtA, i, uint64(j4))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MetadataV2) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MetadataV2) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MetadataV2) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.UnitRef != 0 {
+		i = encodeVarintCortex(dAtA, i, uint64(m.UnitRef))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.HelpRef != 0 {
+		i = encodeVarintCortex(dAtA, i, uint64(m.HelpRef))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.Type != 0 {
+		i = encodeVarintCortex(dAtA, i, uint64(m.Type))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.SkipLabelNameValidation {
+		i--
+		if m.SkipLabelNameValidation {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x3e
+		i--
+		dAtA[i] = 0xc0
+	}
+	if len(m.Metadata) > 0 {
+		for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintCortex(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.Source != 0 {
+		i = encodeVarintCortex(dAtA, i, uint64(m.Source))
+		i--
+		dAtA[i] = 0x10
+	}
+	if len(m.Timeseries) > 0 {
 		for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
 			{
 				size := m.Timeseries[iNdEx].Size()
@@ -2032,30 +2986,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	}
 	if len(m.PositiveCounts) > 0 {
 		for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- {
-			f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx]))
+			f6 := math.Float64bits(float64(m.PositiveCounts[iNdEx]))
 			i -= 8
-			encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1))
+			encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6))
 		}
 		i = encodeVarintCortex(dAtA, i, uint64(len(m.PositiveCounts)*8))
 		i--
 		dAtA[i] = 0x6a
 	}
 	if len(m.PositiveDeltas) > 0 {
-		var j2 int
-		dAtA4 := make([]byte, len(m.PositiveDeltas)*10)
+		var j7 int
+		dAtA9 := make([]byte, len(m.PositiveDeltas)*10)
 		for _, num := range m.PositiveDeltas {
-			x3 := (uint64(num) << 1) ^ uint64((num >> 63))
-			for x3 >= 1<<7 {
-				dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80)
-				j2++
-				x3 >>= 7
-			}
-			dAtA4[j2] = uint8(x3)
-			j2++
+			x8 := (uint64(num) << 1) ^ uint64((num >> 63))
+			for x8 >= 1<<7 {
+				dAtA9[j7] = uint8(uint64(x8)&0x7f | 0x80)
+				j7++
+				x8 >>= 7
+			}
+			dAtA9[j7] = uint8(x8)
+			j7++
 		}
-		i -= j2
-		copy(dAtA[i:], dAtA4[:j2])
-		i = encodeVarintCortex(dAtA, i, uint64(j2))
+		i -= j7
+		copy(dAtA[i:], dAtA9[:j7])
+		i = encodeVarintCortex(dAtA, i, uint64(j7))
 		i--
 		dAtA[i] = 0x62
 	}
@@ -2075,30 +3029,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	}
 	if len(m.NegativeCounts) > 0 {
 		for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- {
-			f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx]))
+			f10 := math.Float64bits(float64(m.NegativeCounts[iNdEx]))
 			i -= 8
-			encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5))
+			encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f10))
 		}
 		i = encodeVarintCortex(dAtA, i, uint64(len(m.NegativeCounts)*8))
 		i--
 		dAtA[i] = 0x52
 	}
 	if len(m.NegativeDeltas) > 0 {
-		var j6 int
-		dAtA8 := make([]byte, len(m.NegativeDeltas)*10)
+		var j11 int
+		dAtA13 := make([]byte, len(m.NegativeDeltas)*10)
 		for _, num := range m.NegativeDeltas {
-			x7 := (uint64(num) << 1) ^ uint64((num >> 63))
-			for x7 >= 1<<7 {
-				dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80)
-				j6++
-				x7 >>= 7
-			}
-			dAtA8[j6] = uint8(x7)
-			j6++
-		}
-		i -= j6
-		copy(dAtA[i:], dAtA8[:j6])
-		i = encodeVarintCortex(dAtA, i, uint64(j6))
+			x12 := (uint64(num) << 1) ^ uint64((num >> 63))
+			for x12 >= 1<<7 {
+				dAtA13[j11] = uint8(uint64(x12)&0x7f | 0x80)
+				j11++
+				x12 >>= 7
+			}
+			dAtA13[j11] = uint8(x12)
+			j11++
+		}
+		i -= j11
+		copy(dAtA[i:], dAtA13[:j11])
+		i = encodeVarintCortex(dAtA, i, uint64(j11))
 		i--
 		dAtA[i] = 0x4a
 	}
@@ -2244,23 +3198,23 @@ func encodeVarintCortex(dAtA []byte, offset int, v uint64) int {
 	dAtA[offset] = uint8(v)
 	return base
 }
-func (m *WriteRequest) Size() (n int) {
+func (m *WriteRequestV2) Size() (n int) {
 	if m == nil {
 		return 0
 	}
 	var l int
 	_ = l
-	if len(m.Timeseries) > 0 {
-		for _, e := range m.Timeseries {
-			l = e.Size()
-			n += 1 + l + sovCortex(uint64(l))
-		}
-	}
 	if m.Source != 0 {
 		n += 1 + sovCortex(uint64(m.Source))
 	}
-	if len(m.Metadata) > 0 {
-		for _, e := range m.Metadata {
+	if len(m.Symbols) > 0 {
+		for _, s := range m.Symbols {
+			l = len(s)
+			n += 1 + l + sovCortex(uint64(l))
+		}
+	}
+	if len(m.Timeseries) > 0 {
+		for _, e := range m.Timeseries {
 			l = e.Size()
 			n += 1 + l + sovCortex(uint64(l))
 		}
@@ -2271,25 +3225,149 @@ func (m *WriteRequest) Size() (n int) {
 	return n
 }
 
-func (m *WriteResponse) Size() (n int) {
+func (m *WriteResponseV2) Size() (n int) {
 	if m == nil {
 		return 0
 	}
 	var l int
 	_ = l
+	if m.Samples != 0 {
+		n += 1 + sovCortex(uint64(m.Samples))
+	}
+	if m.Histograms != 0 {
+		n += 1 + sovCortex(uint64(m.Histograms))
+	}
+	if m.Exemplars != 0 {
+		n += 1 + sovCortex(uint64(m.Exemplars))
+	}
 	return n
 }
 
-func (m *TimeSeries) Size() (n int) {
+func (m *TimeSeriesV2) Size() (n int) {
 	if m == nil {
 		return 0
 	}
 	var l int
 	_ = l
-	if len(m.Labels) > 0 {
-		for _, e := range m.Labels {
-			l = e.Size()
-			n += 1 + l + sovCortex(uint64(l))
+	if len(m.LabelsRefs) > 0 {
+		l = 0
+		for _, e := range m.LabelsRefs {
+			l += sovCortex(uint64(e))
+		}
+		n += 1 + sovCortex(uint64(l)) + l
+	}
+	if len(m.Samples) > 0 {
+		for _, e := range m.Samples {
+			l = e.Size()
+			n += 1 + l + sovCortex(uint64(l))
+		}
+	}
+	if len(m.Histograms) > 0 {
+		for _, e := range m.Histograms {
+			l = e.Size()
+			n += 1 + l + sovCortex(uint64(l))
+		}
+	}
+	if len(m.Exemplars) > 0 {
+		for _, e := range m.Exemplars {
+			l = e.Size()
+			n += 1 + l + sovCortex(uint64(l))
+		}
+	}
+	l = m.Metadata.Size()
+	n += 1 + l + sovCortex(uint64(l))
+	if m.CreatedTimestamp != 0 {
+		n += 1 + sovCortex(uint64(m.CreatedTimestamp))
+	}
+	return n
+}
+
+func (m *ExemplarV2) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.LabelsRefs) > 0 {
+		l = 0
+		for _, e := range m.LabelsRefs {
+			l += sovCortex(uint64(e))
+		}
+		n += 1 + sovCortex(uint64(l)) + l
+	}
+	if m.Value != 0 {
+		n += 9
+	}
+	if m.Timestamp != 0 {
+		n += 1 + sovCortex(uint64(m.Timestamp))
+	}
+	return n
+}
+
+func (m *MetadataV2) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Type != 0 {
+		n += 1 + sovCortex(uint64(m.Type))
+	}
+	if m.HelpRef != 0 {
+		n += 1 + sovCortex(uint64(m.HelpRef))
+	}
+	if m.UnitRef != 0 {
+		n += 1 + sovCortex(uint64(m.UnitRef))
+	}
+	return n
+}
+
+func (m *WriteRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Timeseries) > 0 {
+		for _, e := range m.Timeseries {
+			l = e.Size()
+			n += 1 + l + sovCortex(uint64(l))
+		}
+	}
+	if m.Source != 0 {
+		n += 1 + sovCortex(uint64(m.Source))
+	}
+	if len(m.Metadata) > 0 {
+		for _, e := range m.Metadata {
+			l = e.Size()
+			n += 1 + l + sovCortex(uint64(l))
+		}
+	}
+	if m.SkipLabelNameValidation {
+		n += 3
+	}
+	return n
+}
+
+func (m *WriteResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	return n
+}
+
+func (m *TimeSeries) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Labels) > 0 {
+		for _, e := range m.Labels {
+			l = e.Size()
+			n += 1 + l + sovCortex(uint64(l))
 		}
 	}
 	if len(m.Samples) > 0 {
@@ -2524,6 +3602,85 @@ func sovCortex(x uint64) (n int) {
 func sozCortex(x uint64) (n int) {
 	return sovCortex(uint64((x << 1) ^ uint64((int64(x) >> 63))))
 }
+func (this *WriteRequestV2) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&WriteRequestV2{`,
+		`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
+		`Symbols:` + fmt.Sprintf("%v", this.Symbols) + `,`,
+		`Timeseries:` + fmt.Sprintf("%v", this.Timeseries) + `,`,
+		`SkipLabelNameValidation:` + fmt.Sprintf("%v", this.SkipLabelNameValidation) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *WriteResponseV2) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&WriteResponseV2{`,
+		`Samples:` + fmt.Sprintf("%v", this.Samples) + `,`,
+		`Histograms:` + fmt.Sprintf("%v", this.Histograms) + `,`,
+		`Exemplars:` + fmt.Sprintf("%v", this.Exemplars) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TimeSeriesV2) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForSamples := "[]Sample{"
+	for _, f := range this.Samples {
+		repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForSamples += "}"
+	repeatedStringForHistograms := "[]Histogram{"
+	for _, f := range this.Histograms {
+		repeatedStringForHistograms += strings.Replace(strings.Replace(f.String(), "Histogram", "Histogram", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForHistograms += "}"
+	repeatedStringForExemplars := "[]ExemplarV2{"
+	for _, f := range this.Exemplars {
+		repeatedStringForExemplars += strings.Replace(strings.Replace(f.String(), "ExemplarV2", "ExemplarV2", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForExemplars += "}"
+	s := strings.Join([]string{`&TimeSeriesV2{`,
+		`LabelsRefs:` + fmt.Sprintf("%v", this.LabelsRefs) + `,`,
+		`Samples:` + repeatedStringForSamples + `,`,
+		`Histograms:` + repeatedStringForHistograms + `,`,
+		`Exemplars:` + repeatedStringForExemplars + `,`,
+		`Metadata:` + strings.Replace(strings.Replace(this.Metadata.String(), "MetadataV2", "MetadataV2", 1), `&`, ``, 1) + `,`,
+		`CreatedTimestamp:` + fmt.Sprintf("%v", this.CreatedTimestamp) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ExemplarV2) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ExemplarV2{`,
+		`LabelsRefs:` + fmt.Sprintf("%v", this.LabelsRefs) + `,`,
+		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+		`Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *MetadataV2) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&MetadataV2{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`HelpRef:` + fmt.Sprintf("%v", this.HelpRef) + `,`,
+		`UnitRef:` + fmt.Sprintf("%v", this.UnitRef) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func (this *WriteRequest) String() string {
 	if this == nil {
 		return "nil"
@@ -2727,6 +3884,826 @@ func valueToStringCortex(v interface{}) string {
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("*%v", pv)
 }
+func (m *WriteRequestV2) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCortex
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WriteRequestV2: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WriteRequestV2: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+			}
+			m.Source = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Source |= SourceEnum(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthCortex
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Symbols = append(m.Symbols, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCortex
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Timeseries = append(m.Timeseries, PreallocTimeseriesV2{})
+			if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1000:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SkipLabelNameValidation", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.SkipLabelNameValidation = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCortex(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WriteResponseV2) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCortex
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WriteResponseV2: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WriteResponseV2: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
+			}
+			m.Samples = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Samples |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType)
+			}
+			m.Histograms = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Histograms |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+			}
+			m.Exemplars = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Exemplars |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCortex(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TimeSeriesV2) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCortex
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TimeSeriesV2: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TimeSeriesV2: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType == 0 {
+				var v uint32
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortex
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint32(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.LabelsRefs = append(m.LabelsRefs, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortex
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthCortex
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthCortex
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.LabelsRefs) == 0 {
+					m.LabelsRefs = make([]uint32, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint32
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowCortex
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint32(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.LabelsRefs = append(m.LabelsRefs, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType)
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCortex
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Samples = append(m.Samples, Sample{})
+			if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCortex
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Histograms = append(m.Histograms, Histogram{})
+			if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCortex
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Exemplars = append(m.Exemplars, ExemplarV2{})
+			if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthCortex
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
+			}
+			m.CreatedTimestamp = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.CreatedTimestamp |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCortex(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ExemplarV2) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCortex
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ExemplarV2: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ExemplarV2: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType == 0 {
+				var v uint32
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortex
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint32(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.LabelsRefs = append(m.LabelsRefs, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowCortex
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthCortex
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthCortex
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.LabelsRefs) == 0 {
+					m.LabelsRefs = make([]uint32, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint32
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowCortex
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint32(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.LabelsRefs = append(m.LabelsRefs, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType)
+			}
+		case 2:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.Value = float64(math.Float64frombits(v))
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+			}
+			m.Timestamp = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Timestamp |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCortex(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MetadataV2) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCortex
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MetadataV2: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MetadataV2: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= MetricType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HelpRef", wireType)
+			}
+			m.HelpRef = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.HelpRef |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UnitRef", wireType)
+			}
+			m.UnitRef = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.UnitRef |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCortex(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
 func (m *WriteRequest) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
@@ -2804,7 +4781,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Source |= WriteRequest_SourceEnum(b&0x7F) << shift
+				m.Source |= SourceEnum(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3376,7 +5353,7 @@ func (m *MetricMetadata) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Type |= MetricMetadata_MetricType(b&0x7F) << shift
+				m.Type |= MetricType(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
diff --git a/pkg/cortexpb/cortex.proto b/pkg/cortexpb/cortex.proto
index cedb173183..b16074c007 100644
--- a/pkg/cortexpb/cortex.proto
+++ b/pkg/cortexpb/cortex.proto
@@ -9,12 +9,110 @@ import "gogoproto/gogo.proto";
 option (gogoproto.marshaler_all) = true;
 option (gogoproto.unmarshaler_all) = true;
 
+enum SourceEnum {
+  API = 0;
+  RULE = 1;
+}
+
+enum MetricType {
+  UNKNOWN        = 0;
+  COUNTER        = 1;
+  GAUGE          = 2;
+  HISTOGRAM      = 3;
+  GAUGEHISTOGRAM = 4;
+  SUMMARY        = 5;
+  INFO           = 6;
+  STATESET       = 7;
+}
+
+// https://github.com/prometheus/prometheus/blob/main/prompb/io/prometheus/write/v2/types.proto
+message WriteRequestV2 {
+  reserved 1 to 2;
+  cortexpb.SourceEnum Source = 3;
+  repeated string symbols = 4;
+  repeated TimeSeriesV2 timeseries = 5 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseriesV2"];
+
+  bool skip_label_name_validation = 1000; // set intentionally high to keep WriteRequest compatible with upstream Prometheus
+}
+
+message WriteResponseV2 {
+  // Samples represents X-Prometheus-Remote-Write-Written-Samples
+  int64 Samples = 1;
+  // Histograms represents X-Prometheus-Remote-Write-Written-Histograms
+  int64 Histograms = 2;
+  // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars
+  int64 Exemplars = 3;
+}
+
+message TimeSeriesV2 {
+  repeated uint32 labels_refs = 1;
+  // Timeseries messages can either specify samples or (native) histogram samples
+  // (histogram field), but not both. For a typical sender (real-time metric
+  // streaming), in healthy cases, there will be only one sample or histogram.
+  //
+  // Samples and histograms are sorted by timestamp (older first).
+  repeated Sample samples = 2 [(gogoproto.nullable) = false];
+  repeated Histogram histograms = 3 [(gogoproto.nullable) = false];
+
+  // exemplars represents an optional set of exemplars attached to this series' samples.
+  repeated ExemplarV2 exemplars = 4 [(gogoproto.nullable) = false];
+
+  // metadata represents the metadata associated with the given series' samples.
+  MetadataV2 metadata = 5 [(gogoproto.nullable) = false];
+
+  // created_timestamp represents an optional created timestamp associated with
+  // this series' samples in ms format, typically for counter or histogram type
+  // metrics. Created timestamp represents the time when the counter started
+  // counting (sometimes referred to as start timestamp), which can increase
+  // the accuracy of query results.
+  //
+  // Note that some receivers might require this and in return fail to
+  // ingest such samples within the Request.
+  //
+  // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+  // for conversion from/to time.Time to Prometheus timestamp.
+  //
+  // Note that the "optional" keyword is omitted due to
+  // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
+  // Zero value means value not set. If you need to use exactly zero value for
+  // the timestamp, use 1 millisecond before or after.
+  int64 created_timestamp = 6;
+}
+
+// Exemplar is an additional information attached to some series' samples.
+// It is typically used to attach an example trace or request ID associated with
+// the metric changes.
+message ExemplarV2 {
+  // labels_refs is an optional list of label name-value pair references, encoded
+  // as indices to the Request.symbols array. This list's len is always
+  // a multiple of 2, and the underlying labels should be sorted lexicographically.
+  // If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
+  repeated uint32 labels_refs = 1;
+  // value represents an exact example value. This can be useful when the exemplar
+  // is attached to a histogram, which only gives an estimated value through buckets.
+  double value = 2;
+  // timestamp represents the timestamp of the exemplar in ms.
+  //
+  // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+  // for conversion from/to time.Time to Prometheus timestamp.
+  int64 timestamp = 3;
+}
+
+// Metadata represents the metadata associated with the given series' samples.
+message MetadataV2 {
+  MetricType type = 1;
+  // help_ref is a reference to the Request.symbols array representing help
+  // text for the metric. Help is optional, reference should point to an empty string in
+  // such a case.
+  uint32 help_ref = 3;
+  // unit_ref is a reference to the Request.symbols array representing a unit
+  // for the metric. Unit is optional, reference should point to an empty string in
+  // such a case.
+  uint32 unit_ref = 4;
+}
+
 message WriteRequest {
   repeated TimeSeries timeseries = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseries"];
-  enum SourceEnum {
-    API = 0;
-    RULE = 1;
-  }
   SourceEnum Source = 2;
   repeated MetricMetadata metadata = 3 [(gogoproto.nullable) = true];
 
@@ -42,17 +140,6 @@ message Sample {
 }
 
 message MetricMetadata {
-  enum MetricType {
-    UNKNOWN        = 0;
-    COUNTER        = 1;
-    GAUGE          = 2;
-    HISTOGRAM      = 3;
-    GAUGEHISTOGRAM = 4;
-    SUMMARY        = 5;
-    INFO           = 6;
-    STATESET       = 7;
-  }
-
   MetricType type = 1;
   string metric_family_name = 2;
   string help = 4;
diff --git a/pkg/cortexpbv2/timeseriesv2.go b/pkg/cortexpb/timeseriesv2.go
similarity index 66%
rename from pkg/cortexpbv2/timeseriesv2.go
rename to pkg/cortexpb/timeseriesv2.go
index 684e4bfac0..28eca0f973 100644
--- a/pkg/cortexpbv2/timeseriesv2.go
+++ b/pkg/cortexpb/timeseriesv2.go
@@ -1,59 +1,52 @@
-package cortexpbv2
+package cortexpb
 
 import (
 	"sync"
-
-	"github.com/cortexproject/cortex/pkg/cortexpb"
 )
 
 var (
-	expectedTimeseries          = 100
-	expectedLabels              = 20
-	expectedSymbols             = 20
-	expectedSamplesPerSeries    = 10
-	expectedExemplarsPerSeries  = 1
-	expectedHistogramsPerSeries = 1
-
-	slicePool = sync.Pool{
+	expectedSymbols = 20
+
+	slicePoolV2 = sync.Pool{
 		New: func() interface{} {
 			return make([]PreallocTimeseriesV2, 0, expectedTimeseries)
 		},
 	}
 
-	timeSeriesPool = sync.Pool{
+	timeSeriesPoolV2 = sync.Pool{
 		New: func() interface{} {
-			return &TimeSeries{
+			return &TimeSeriesV2{
 				LabelsRefs: make([]uint32, 0, expectedLabels),
 				Samples:    make([]Sample, 0, expectedSamplesPerSeries),
 				Histograms: make([]Histogram, 0, expectedHistogramsPerSeries),
-				Exemplars:  make([]Exemplar, 0, expectedExemplarsPerSeries),
-				Metadata:   Metadata{},
+				Exemplars:  make([]ExemplarV2, 0, expectedExemplarsPerSeries),
+				Metadata:   MetadataV2{},
 			}
 		},
 	}
 
-	writeRequestPool = sync.Pool{
+	writeRequestPoolV2 = sync.Pool{
 		New: func() interface{} {
 			return &PreallocWriteRequestV2{
-				WriteRequest: WriteRequest{
+				WriteRequestV2: WriteRequestV2{
 					Symbols: make([]string, 0, expectedSymbols),
 				},
 			}
 		},
 	}
-	bytePool = cortexpb.NewSlicePool(20)
+	bytePoolV2 = NewSlicePool(20)
 )
 
 // PreallocWriteRequestV2 is a WriteRequest which preallocs slices on Unmarshal.
 type PreallocWriteRequestV2 struct {
-	WriteRequest
+	WriteRequestV2
 	data *[]byte
 }
 
 // Unmarshal implements proto.Message.
 func (p *PreallocWriteRequestV2) Unmarshal(dAtA []byte) error {
 	p.Timeseries = PreallocTimeseriesV2SliceFromPool()
-	return p.WriteRequest.Unmarshal(dAtA)
+	return p.WriteRequestV2.Unmarshal(dAtA)
 }
 
 func (p *PreallocWriteRequestV2) Marshal() (dAtA []byte, err error) {
@@ -69,53 +62,53 @@ func (p *PreallocWriteRequestV2) Marshal() (dAtA []byte, err error) {
 
 // PreallocTimeseriesV2 is a TimeSeries which preallocs slices on Unmarshal.
 type PreallocTimeseriesV2 struct {
-	*TimeSeries
+	*TimeSeriesV2
 }
 
 // Unmarshal implements proto.Message.
 func (p *PreallocTimeseriesV2) Unmarshal(dAtA []byte) error {
-	p.TimeSeries = TimeseriesV2FromPool()
-	return p.TimeSeries.Unmarshal(dAtA)
+	p.TimeSeriesV2 = TimeseriesV2FromPool()
+	return p.TimeSeriesV2.Unmarshal(dAtA)
 }
 
 func ReuseWriteRequestV2(req *PreallocWriteRequestV2) {
 	if req.data != nil {
-		bytePool.ReuseSlice(req.data)
+		bytePoolV2.ReuseSlice(req.data)
 		req.data = nil
 	}
 	req.Source = 0
 	req.Symbols = nil
 	req.Timeseries = nil
-	writeRequestPool.Put(req)
+	writeRequestPoolV2.Put(req)
 }
 
 func PreallocWriteRequestV2FromPool() *PreallocWriteRequestV2 {
-	return writeRequestPool.Get().(*PreallocWriteRequestV2)
+	return writeRequestPoolV2.Get().(*PreallocWriteRequestV2)
 }
 
 // PreallocTimeseriesV2SliceFromPool retrieves a slice of PreallocTimeseriesV2 from a sync.Pool.
 // ReuseSlice should be called once done.
 func PreallocTimeseriesV2SliceFromPool() []PreallocTimeseriesV2 {
-	return slicePool.Get().([]PreallocTimeseriesV2)
+	return slicePoolV2.Get().([]PreallocTimeseriesV2)
 }
 
 // ReuseSlice puts the slice back into a sync.Pool for reuse.
-func ReuseSlice(ts []PreallocTimeseriesV2) {
+func ReuseSliceV2(ts []PreallocTimeseriesV2) {
 	for i := range ts {
-		ReuseTimeseries(ts[i].TimeSeries)
+		ReuseTimeseriesV2(ts[i].TimeSeriesV2)
 	}
 
-	slicePool.Put(ts[:0]) //nolint:staticcheck //see comment on slicePool for more details
+	slicePoolV2.Put(ts[:0]) //nolint:staticcheck //see comment on slicePool for more details
 }
 
 // TimeseriesV2FromPool retrieves a pointer to a TimeSeries from a sync.Pool.
 // ReuseTimeseries should be called once done, unless ReuseSlice was called on the slice that contains this TimeSeries.
-func TimeseriesV2FromPool() *TimeSeries {
-	return timeSeriesPool.Get().(*TimeSeries)
+func TimeseriesV2FromPool() *TimeSeriesV2 {
+	return timeSeriesPoolV2.Get().(*TimeSeriesV2)
 }
 
 // ReuseTimeseries puts the timeseries back into a sync.Pool for reuse.
-func ReuseTimeseries(ts *TimeSeries) {
+func ReuseTimeseriesV2(ts *TimeSeriesV2) {
 	// clear ts lableRef and samples
 	ts.LabelsRefs = ts.LabelsRefs[:0]
 	ts.Samples = ts.Samples[:0]
@@ -131,6 +124,6 @@ func ReuseTimeseries(ts *TimeSeries) {
 
 	ts.Exemplars = ts.Exemplars[:0]
 	ts.Histograms = ts.Histograms[:0]
-	ts.Metadata = Metadata{}
-	timeSeriesPool.Put(ts)
+	ts.Metadata = MetadataV2{}
+	timeSeriesPoolV2.Put(ts)
 }
diff --git a/pkg/cortexpbv2/timeseriesv2_test.go b/pkg/cortexpb/timeseriesv2_test.go
similarity index 82%
rename from pkg/cortexpbv2/timeseriesv2_test.go
rename to pkg/cortexpb/timeseriesv2_test.go
index d10527564e..1a0c007f6f 100644
--- a/pkg/cortexpbv2/timeseriesv2_test.go
+++ b/pkg/cortexpb/timeseriesv2_test.go
@@ -1,4 +1,4 @@
-package cortexpbv2
+package cortexpb
 
 import (
 	"fmt"
@@ -19,8 +19,8 @@ func TestPreallocTimeseriesV2SliceFromPool(t *testing.T) {
 
 	t.Run("instance is cleaned before reusing", func(t *testing.T) {
 		slice := PreallocTimeseriesV2SliceFromPool()
-		slice = append(slice, PreallocTimeseriesV2{TimeSeries: &TimeSeries{}})
-		ReuseSlice(slice)
+		slice = append(slice, PreallocTimeseriesV2{TimeSeriesV2: &TimeSeriesV2{}})
+		ReuseSliceV2(slice)
 
 		reused := PreallocTimeseriesV2SliceFromPool()
 		assert.Len(t, reused, 0)
@@ -38,11 +38,11 @@ func TestTimeseriesV2FromPool(t *testing.T) {
 	t.Run("instance is cleaned before reusing", func(t *testing.T) {
 		ts := TimeseriesV2FromPool()
 		ts.LabelsRefs = []uint32{1, 2}
-		ts.Samples = []Sample{{Value: 1, Timestamp: 2}}
-		ts.Exemplars = []Exemplar{{LabelsRefs: []uint32{1, 2}, Value: 1, Timestamp: 2}}
+		ts.Samples = []Sample{{Value: 1, TimestampMs: 2}}
+		ts.Exemplars = []ExemplarV2{{LabelsRefs: []uint32{1, 2}, Value: 1, Timestamp: 2}}
 		ts.Histograms = []Histogram{{}}
 		fmt.Println("ts.Histograms", len(ts.Histograms))
-		ReuseTimeseries(ts)
+		ReuseTimeseriesV2(ts)
 
 		reused := TimeseriesV2FromPool()
 		assert.Len(t, reused.LabelsRefs, 0)
@@ -52,13 +52,13 @@ func TestTimeseriesV2FromPool(t *testing.T) {
 	})
 }
 
-func BenchmarkMarshallWriteRequest(b *testing.B) {
+func BenchmarkMarshallWriteRequestV2(b *testing.B) {
 	ts := PreallocTimeseriesV2SliceFromPool()
 
 	for i := 0; i < 100; i++ {
-		ts = append(ts, PreallocTimeseriesV2{TimeSeries: TimeseriesV2FromPool()})
+		ts = append(ts, PreallocTimeseriesV2{TimeSeriesV2: TimeseriesV2FromPool()})
 		ts[i].LabelsRefs = []uint32{1, 2, 3, 4, 5, 6, 7, 8}
-		ts[i].Samples = []Sample{{Value: 1, Timestamp: 2}}
+		ts[i].Samples = []Sample{{Value: 1, TimestampMs: 2}}
 	}
 
 	tests := []struct {
@@ -69,7 +69,7 @@ func BenchmarkMarshallWriteRequest(b *testing.B) {
 		{
 			name: "no-pool",
 			writeRequestFactory: func() proto.Marshaler {
-				return &WriteRequest{Timeseries: ts}
+				return &WriteRequestV2{Timeseries: ts}
 			},
 			clean: func(in interface{}) {},
 		},
diff --git a/pkg/cortexpbv2/codecv2.go b/pkg/cortexpbv2/codecv2.go
deleted file mode 100644
index 9fe7b434e0..0000000000
--- a/pkg/cortexpbv2/codecv2.go
+++ /dev/null
@@ -1,160 +0,0 @@
-package cortexpbv2
-
-import (
-	"github.com/prometheus/prometheus/model/histogram"
-	"github.com/prometheus/prometheus/model/labels"
-
-	"github.com/cortexproject/cortex/pkg/cortexpb"
-)
-
-// ToLabels return model labels.Labels from timeseries' remote labels.
-func (t TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels {
-	return desymbolizeLabels(b, t.GetLabelsRefs(), symbols)
-}
-
-// ToLabels return model labels.Labels from exemplar remote labels.
-func (e Exemplar) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels {
-	return desymbolizeLabels(b, e.GetLabelsRefs(), symbols)
-}
-
-func (m Metadata) ToV1Metadata(name string, symbols []string) *cortexpb.MetricMetadata {
-	typ := cortexpb.UNKNOWN
-
-	switch m.Type {
-	case METRIC_TYPE_COUNTER:
-		typ = cortexpb.COUNTER
-	case METRIC_TYPE_GAUGE:
-		typ = cortexpb.GAUGE
-	case METRIC_TYPE_HISTOGRAM:
-		typ = cortexpb.HISTOGRAM
-	case METRIC_TYPE_GAUGEHISTOGRAM:
-		typ = cortexpb.GAUGEHISTOGRAM
-	case METRIC_TYPE_SUMMARY:
-		typ = cortexpb.SUMMARY
-	case METRIC_TYPE_INFO:
-		typ = cortexpb.INFO
-	case METRIC_TYPE_STATESET:
-		typ = cortexpb.STATESET
-	}
-
-	return &cortexpb.MetricMetadata{
-		Type:             typ,
-		MetricFamilyName: name,
-		Unit:             symbols[m.UnitRef],
-		Help:             symbols[m.HelpRef],
-	}
-}
-
-// desymbolizeLabels decodes label references, with given symbols to labels.
-func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels {
-	b.Reset()
-	for i := 0; i < len(labelRefs); i += 2 {
-		b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]])
-	}
-	b.Sort()
-	return b.Labels()
-}
-
-// IsFloatHistogram returns true if the histogram is float.
-func (h Histogram) IsFloatHistogram() bool {
-	_, ok := h.GetCount().(*Histogram_CountFloat)
-	return ok
-}
-
-// FloatHistogramProtoToFloatHistogram extracts a float Histogram from the provided proto message.
-func FloatHistogramProtoToFloatHistogram(h Histogram) *histogram.FloatHistogram {
-	if !h.IsFloatHistogram() {
-		panic("FloatHistogramProtoToFloatHistogram called with an integer histogram")
-	}
-
-	return &histogram.FloatHistogram{
-		CounterResetHint: histogram.CounterResetHint(h.ResetHint),
-		Schema:           h.Schema,
-		ZeroThreshold:    h.ZeroThreshold,
-		ZeroCount:        h.GetZeroCountFloat(),
-		Count:            h.GetCountFloat(),
-		Sum:              h.Sum,
-		PositiveSpans:    spansProtoToSpans(h.GetPositiveSpans()),
-		PositiveBuckets:  h.GetPositiveCounts(),
-		NegativeSpans:    spansProtoToSpans(h.GetNegativeSpans()),
-		NegativeBuckets:  h.GetNegativeCounts(),
-		CustomValues:     h.GetCustomValues(),
-	}
-}
-
-func HistogramProtoToHistogram(h Histogram) *histogram.Histogram {
-	if h.IsFloatHistogram() {
-		panic("HistogramProtoToHistogram called with a float histogram")
-	}
-
-	return &histogram.Histogram{
-		CounterResetHint: histogram.CounterResetHint(h.ResetHint),
-		Schema:           h.Schema,
-		ZeroThreshold:    h.ZeroThreshold,
-		ZeroCount:        h.GetZeroCountInt(),
-		Count:            h.GetCountInt(),
-		Sum:              h.Sum,
-		PositiveSpans:    spansProtoToSpans(h.GetPositiveSpans()),
-		PositiveBuckets:  h.GetPositiveDeltas(),
-		NegativeSpans:    spansProtoToSpans(h.GetNegativeSpans()),
-		NegativeBuckets:  h.GetNegativeDeltas(),
-		CustomValues:     h.GetCustomValues(),
-	}
-}
-
-// HistogramToHistogramProto converts a (normal integer) Histogram to its protobuf message type.
-// Changed from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L709-L723
-func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) Histogram {
-	return Histogram{
-		Count:          &Histogram_CountInt{CountInt: h.Count},
-		Sum:            h.Sum,
-		Schema:         h.Schema,
-		ZeroThreshold:  h.ZeroThreshold,
-		ZeroCount:      &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
-		NegativeSpans:  spansToSpansProto(h.NegativeSpans),
-		NegativeDeltas: h.NegativeBuckets,
-		PositiveSpans:  spansToSpansProto(h.PositiveSpans),
-		PositiveDeltas: h.PositiveBuckets,
-		ResetHint:      Histogram_ResetHint(h.CounterResetHint),
-		Timestamp:      timestamp,
-		CustomValues:   h.CustomValues,
-	}
-}
-
-// FloatHistogramToHistogramProto converts a float Histogram to a normal
-// Histogram's protobuf message type.
-// Changed from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L725-L739
-func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogram) Histogram {
-	return Histogram{
-		Count:          &Histogram_CountFloat{CountFloat: fh.Count},
-		Sum:            fh.Sum,
-		Schema:         fh.Schema,
-		ZeroThreshold:  fh.ZeroThreshold,
-		ZeroCount:      &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
-		NegativeSpans:  spansToSpansProto(fh.NegativeSpans),
-		NegativeCounts: fh.NegativeBuckets,
-		PositiveSpans:  spansToSpansProto(fh.PositiveSpans),
-		PositiveCounts: fh.PositiveBuckets,
-		ResetHint:      Histogram_ResetHint(fh.CounterResetHint),
-		Timestamp:      timestamp,
-		CustomValues:   fh.CustomValues,
-	}
-}
-
-func spansProtoToSpans(s []BucketSpan) []histogram.Span {
-	spans := make([]histogram.Span, len(s))
-	for i := 0; i < len(s); i++ {
-		spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
-	}
-
-	return spans
-}
-
-func spansToSpansProto(s []histogram.Span) []BucketSpan {
-	spans := make([]BucketSpan, len(s))
-	for i := 0; i < len(s); i++ {
-		spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
-	}
-
-	return spans
-}
diff --git a/pkg/cortexpbv2/cortexv2.pb.go b/pkg/cortexpbv2/cortexv2.pb.go
deleted file mode 100644
index 456977d170..0000000000
--- a/pkg/cortexpbv2/cortexv2.pb.go
+++ /dev/null
@@ -1,4340 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: cortexv2.proto
-
-package cortexpbv2
-
-import (
-	encoding_binary "encoding/binary"
-	fmt "fmt"
-	_ "github.com/gogo/protobuf/gogoproto"
-	proto "github.com/gogo/protobuf/proto"
-	io "io"
-	math "math"
-	math_bits "math/bits"
-	reflect "reflect"
-	strconv "strconv"
-	strings "strings"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-type WriteRequest_SourceEnum int32
-
-const (
-	API  WriteRequest_SourceEnum = 0
-	RULE WriteRequest_SourceEnum = 1
-)
-
-var WriteRequest_SourceEnum_name = map[int32]string{
-	0: "API",
-	1: "RULE",
-}
-
-var WriteRequest_SourceEnum_value = map[string]int32{
-	"API":  0,
-	"RULE": 1,
-}
-
-func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_affad2b75b7d03df, []int{0, 0}
-}
-
-type Metadata_MetricType int32
-
-const (
-	METRIC_TYPE_UNSPECIFIED    Metadata_MetricType = 0
-	METRIC_TYPE_COUNTER        Metadata_MetricType = 1
-	METRIC_TYPE_GAUGE          Metadata_MetricType = 2
-	METRIC_TYPE_HISTOGRAM      Metadata_MetricType = 3
-	METRIC_TYPE_GAUGEHISTOGRAM Metadata_MetricType = 4
-	METRIC_TYPE_SUMMARY        Metadata_MetricType = 5
-	METRIC_TYPE_INFO           Metadata_MetricType = 6
-	METRIC_TYPE_STATESET       Metadata_MetricType = 7
-)
-
-var Metadata_MetricType_name = map[int32]string{
-	0: "METRIC_TYPE_UNSPECIFIED",
-	1: "METRIC_TYPE_COUNTER",
-	2: "METRIC_TYPE_GAUGE",
-	3: "METRIC_TYPE_HISTOGRAM",
-	4: "METRIC_TYPE_GAUGEHISTOGRAM",
-	5: "METRIC_TYPE_SUMMARY",
-	6: "METRIC_TYPE_INFO",
-	7: "METRIC_TYPE_STATESET",
-}
-
-var Metadata_MetricType_value = map[string]int32{
-	"METRIC_TYPE_UNSPECIFIED":    0,
-	"METRIC_TYPE_COUNTER":        1,
-	"METRIC_TYPE_GAUGE":          2,
-	"METRIC_TYPE_HISTOGRAM":      3,
-	"METRIC_TYPE_GAUGEHISTOGRAM": 4,
-	"METRIC_TYPE_SUMMARY":        5,
-	"METRIC_TYPE_INFO":           6,
-	"METRIC_TYPE_STATESET":       7,
-}
-
-func (Metadata_MetricType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_affad2b75b7d03df, []int{5, 0}
-}
-
-type Histogram_ResetHint int32
-
-const (
-	RESET_HINT_UNSPECIFIED Histogram_ResetHint = 0
-	RESET_HINT_YES         Histogram_ResetHint = 1
-	RESET_HINT_NO          Histogram_ResetHint = 2
-	RESET_HINT_GAUGE       Histogram_ResetHint = 3
-)
-
-var Histogram_ResetHint_name = map[int32]string{
-	0: "RESET_HINT_UNSPECIFIED",
-	1: "RESET_HINT_YES",
-	2: "RESET_HINT_NO",
-	3: "RESET_HINT_GAUGE",
-}
-
-var Histogram_ResetHint_value = map[string]int32{
-	"RESET_HINT_UNSPECIFIED": 0,
-	"RESET_HINT_YES":         1,
-	"RESET_HINT_NO":          2,
-	"RESET_HINT_GAUGE":       3,
-}
-
-func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_affad2b75b7d03df, []int{6, 0}
-}
-
-// https://github.com/prometheus/prometheus/blob/main/prompb/io/prometheus/write/v2/types.proto
-type WriteRequest struct {
-	Source                  WriteRequest_SourceEnum `protobuf:"varint,3,opt,name=Source,proto3,enum=cortexpbv2.WriteRequest_SourceEnum" json:"Source,omitempty"`
-	Symbols                 []string                `protobuf:"bytes,4,rep,name=symbols,proto3" json:"symbols,omitempty"`
-	Timeseries              []PreallocTimeseriesV2  `protobuf:"bytes,5,rep,name=timeseries,proto3,customtype=PreallocTimeseriesV2" json:"timeseries"`
-	SkipLabelNameValidation bool                    `protobuf:"varint,1000,opt,name=skip_label_name_validation,json=skipLabelNameValidation,proto3" json:"skip_label_name_validation,omitempty"`
-}
-
-func (m *WriteRequest) Reset()      { *m = WriteRequest{} }
-func (*WriteRequest) ProtoMessage() {}
-func (*WriteRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_affad2b75b7d03df, []int{0}
-}
-func (m *WriteRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *WriteRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WriteRequest.Merge(m, src)
-}
-func (m *WriteRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *WriteRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_WriteRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WriteRequest proto.InternalMessageInfo
-
-func (m *WriteRequest) GetSource() WriteRequest_SourceEnum {
-	if m != nil {
-		return m.Source
-	}
-	return API
-}
-
-func (m *WriteRequest) GetSymbols() []string {
-	if m != nil {
-		return m.Symbols
-	}
-	return nil
-}
-
-func (m *WriteRequest) GetSkipLabelNameValidation() bool {
-	if m != nil {
-		return m.SkipLabelNameValidation
-	}
-	return false
-}
-
-type WriteResponse struct {
-	// Samples represents X-Prometheus-Remote-Write-Written-Samples
-	Samples int64 `protobuf:"varint,1,opt,name=Samples,proto3" json:"Samples,omitempty"`
-	// Histograms represents X-Prometheus-Remote-Write-Written-Histograms
-	Histograms int64 `protobuf:"varint,2,opt,name=Histograms,proto3" json:"Histograms,omitempty"`
-	// Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars
-	Exemplars int64 `protobuf:"varint,3,opt,name=Exemplars,proto3" json:"Exemplars,omitempty"`
-}
-
-func (m *WriteResponse) Reset()      { *m = WriteResponse{} }
-func (*WriteResponse) ProtoMessage() {}
-func (*WriteResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_affad2b75b7d03df, []int{1}
-}
-func (m *WriteResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_WriteResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *WriteResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WriteResponse.Merge(m, src)
-}
-func (m *WriteResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *WriteResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_WriteResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WriteResponse proto.InternalMessageInfo
-
-func (m *WriteResponse) GetSamples() int64 {
-	if m != nil {
-		return m.Samples
-	}
-	return 0
-}
-
-func (m *WriteResponse) GetHistograms() int64 {
-	if m != nil {
-		return m.Histograms
-	}
-	return 0
-}
-
-func (m *WriteResponse) GetExemplars() int64 {
-	if m != nil {
-		return m.Exemplars
-	}
-	return 0
-}
-
-type TimeSeries struct {
-	LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"`
-	// Timeseries messages can either specify samples or (native) histogram samples
-	// (histogram field), but not both. For a typical sender (real-time metric
-	// streaming), in healthy cases, there will be only one sample or histogram.
-	//
-	// Samples and histograms are sorted by timestamp (older first).
-	Samples    []Sample    `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
-	Histograms []Histogram `protobuf:"bytes,3,rep,name=histograms,proto3" json:"histograms"`
-	// exemplars represents an optional set of exemplars attached to this series' samples.
-	Exemplars []Exemplar `protobuf:"bytes,4,rep,name=exemplars,proto3" json:"exemplars"`
-	// metadata represents the metadata associated with the given series' samples.
-	Metadata Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"`
-	// created_timestamp represents an optional created timestamp associated with
-	// this series' samples in ms format, typically for counter or histogram type
-	// metrics. Created timestamp represents the time when the counter started
-	// counting (sometimes referred to as start timestamp), which can increase
-	// the accuracy of query results.
-	//
-	// Note that some receivers might require this and in return fail to
-	// ingest such samples within the Request.
-	//
-	// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
-	// for conversion from/to time.Time to Prometheus timestamp.
-	//
-	// Note that the "optional" keyword is omitted due to
-	// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
-	// Zero value means value not set. If you need to use exactly zero value for
-	// the timestamp, use 1 millisecond before or after.
-	CreatedTimestamp int64 `protobuf:"varint,6,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
-}
-
-func (m *TimeSeries) Reset()      { *m = TimeSeries{} }
-func (*TimeSeries) ProtoMessage() {}
-func (*TimeSeries) Descriptor() ([]byte, []int) {
-	return fileDescriptor_affad2b75b7d03df, []int{2}
-}
-func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *TimeSeries) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TimeSeries.Merge(m, src)
-}
-func (m *TimeSeries) XXX_Size() int {
-	return m.Size()
-}
-func (m *TimeSeries) XXX_DiscardUnknown() {
-	xxx_messageInfo_TimeSeries.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
-
-func (m *TimeSeries) GetLabelsRefs() []uint32 {
-	if m != nil {
-		return m.LabelsRefs
-	}
-	return nil
-}
-
-func (m *TimeSeries) GetSamples() []Sample {
-	if m != nil {
-		return m.Samples
-	}
-	return nil
-}
-
-func (m *TimeSeries) GetHistograms() []Histogram {
-	if m != nil {
-		return m.Histograms
-	}
-	return nil
-}
-
-func (m *TimeSeries) GetExemplars() []Exemplar {
-	if m != nil {
-		return m.Exemplars
-	}
-	return nil
-}
-
-func (m *TimeSeries) GetMetadata() Metadata {
-	if m != nil {
-		return m.Metadata
-	}
-	return Metadata{}
-}
-
-func (m *TimeSeries) GetCreatedTimestamp() int64 {
-	if m != nil {
-		return m.CreatedTimestamp
-	}
-	return 0
-}
-
-// Exemplar is an additional information attached to some series' samples.
-// It is typically used to attach an example trace or request ID associated with
-// the metric changes.
-type Exemplar struct {
-	// labels_refs is an optional list of label name-value pair references, encoded
-	// as indices to the Request.symbols array. This list's len is always
-	// a multiple of 2, and the underlying labels should be sorted lexicographically.
-	// If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
-	LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"`
-	// value represents an exact example value. This can be useful when the exemplar
-	// is attached to a histogram, which only gives an estimated value through buckets.
-	Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
-	// timestamp represents the timestamp of the exemplar in ms.
-	//
-	// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
-	// for conversion from/to time.Time to Prometheus timestamp.
-	Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
-}
-
-func (m *Exemplar) Reset()      { *m = Exemplar{} }
-func (*Exemplar) ProtoMessage() {}
-func (*Exemplar) Descriptor() ([]byte, []int) {
-	return fileDescriptor_affad2b75b7d03df, []int{3}
-}
-func (m *Exemplar) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Exemplar) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Exemplar.Merge(m, src)
-}
-func (m *Exemplar) XXX_Size() int {
-	return m.Size()
-}
-func (m *Exemplar) XXX_DiscardUnknown() {
-	xxx_messageInfo_Exemplar.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Exemplar proto.InternalMessageInfo
-
-func (m *Exemplar) GetLabelsRefs() []uint32 {
-	if m != nil {
-		return m.LabelsRefs
-	}
-	return nil
-}
-
-func (m *Exemplar) GetValue() float64 {
-	if m != nil {
-		return m.Value
-	}
-	return 0
-}
-
-func (m *Exemplar) GetTimestamp() int64 {
-	if m != nil {
-		return m.Timestamp
-	}
-	return 0
-}
-
-// Sample represents series sample.
-type Sample struct {
-	// value of the sample.
-	Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
-	// timestamp represents timestamp of the sample in ms.
-	//
-	// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
-	// for conversion from/to time.Time to Prometheus timestamp.
-	Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
-}
-
-func (m *Sample) Reset()      { *m = Sample{} }
-func (*Sample) ProtoMessage() {}
-func (*Sample) Descriptor() ([]byte, []int) {
-	return fileDescriptor_affad2b75b7d03df, []int{4}
-}
-func (m *Sample) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Sample.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Sample) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Sample.Merge(m, src)
-}
-func (m *Sample) XXX_Size() int {
-	return m.Size()
-}
-func (m *Sample) XXX_DiscardUnknown() {
-	xxx_messageInfo_Sample.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Sample proto.InternalMessageInfo
-
-func (m *Sample) GetValue() float64 {
-	if m != nil {
-		return m.Value
-	}
-	return 0
-}
-
-func (m *Sample) GetTimestamp() int64 {
-	if m != nil {
-		return m.Timestamp
-	}
-	return 0
-}
-
-// Metadata represents the metadata associated with the given series' samples.
-type Metadata struct {
-	Type Metadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortexpbv2.Metadata_MetricType" json:"type,omitempty"`
-	// help_ref is a reference to the Request.symbols array representing help
-	// text for the metric. Help is optional, reference should point to an empty string in
-	// such a case.
-	HelpRef uint32 `protobuf:"varint,3,opt,name=help_ref,json=helpRef,proto3" json:"help_ref,omitempty"`
-	// unit_ref is a reference to the Request.symbols array representing a unit
-	// for the metric. Unit is optional, reference should point to an empty string in
-	// such a case.
-	UnitRef uint32 `protobuf:"varint,4,opt,name=unit_ref,json=unitRef,proto3" json:"unit_ref,omitempty"`
-}
-
-func (m *Metadata) Reset()      { *m = Metadata{} }
-func (*Metadata) ProtoMessage() {}
-func (*Metadata) Descriptor() ([]byte, []int) {
-	return fileDescriptor_affad2b75b7d03df, []int{5}
-}
-func (m *Metadata) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Metadata) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Metadata.Merge(m, src)
-}
-func (m *Metadata) XXX_Size() int {
-	return m.Size()
-}
-func (m *Metadata) XXX_DiscardUnknown() {
-	xxx_messageInfo_Metadata.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Metadata proto.InternalMessageInfo
-
-func (m *Metadata) GetType() Metadata_MetricType {
-	if m != nil {
-		return m.Type
-	}
-	return METRIC_TYPE_UNSPECIFIED
-}
-
-func (m *Metadata) GetHelpRef() uint32 {
-	if m != nil {
-		return m.HelpRef
-	}
-	return 0
-}
-
-func (m *Metadata) GetUnitRef() uint32 {
-	if m != nil {
-		return m.UnitRef
-	}
-	return 0
-}
-
-// A native histogram, also known as a sparse histogram.
-// Original design doc:
-// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
-// The appendix of this design doc also explains the concept of float
-// histograms. This Histogram message can represent both, the usual
-// integer histogram as well as a float histogram.
-type Histogram struct {
-	// Types that are valid to be assigned to Count:
-	//
-	//	*Histogram_CountInt
-	//	*Histogram_CountFloat
-	Count isHistogram_Count `protobuf_oneof:"count"`
-	Sum   float64           `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"`
-	// The schema defines the bucket schema. Currently, valid numbers
-	// are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be
-	// added in future for new bucketing layouts.
-	//
-	// The schema equal to -53 means custom buckets. See
-	// custom_values field description for more details.
-	//
-	// Values between -4 and 8 represent base-2 bucket schema, where 1
-	// is a bucket boundary in each case, and then each power of two is
-	// divided into 2^n (n is schema value) logarithmic buckets. Or in other words,
-	// each bucket boundary is the previous boundary times 2^(2^-n).
-	Schema        int32   `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"`
-	ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"`
-	// Types that are valid to be assigned to ZeroCount:
-	//
-	//	*Histogram_ZeroCountInt
-	//	*Histogram_ZeroCountFloat
-	ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"`
-	// Negative Buckets.
-	NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"`
-	// Use either "negative_deltas" or "negative_counts", the former for
-	// regular histograms with integer counts, the latter for
-	// float histograms.
-	NegativeDeltas []int64   `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"`
-	NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"`
-	// Positive Buckets.
-	//
-	// In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows:
-	// * The span offset+length points to an the index of the custom_values array
-	// or +Inf if pointing to the len of the array.
-	// * The counts and deltas have the same meaning as for exponential histograms.
-	PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"`
-	// Use either "positive_deltas" or "positive_counts", the former for
-	// regular histograms with integer counts, the latter for
-	// float histograms.
-	PositiveDeltas []int64             `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"`
-	PositiveCounts []float64           `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"`
-	ResetHint      Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=cortexpbv2.Histogram_ResetHint" json:"reset_hint,omitempty"`
-	// timestamp represents timestamp of the sample in ms.
-	//
-	// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
-	// for conversion from/to time.Time to Prometheus timestamp.
-	Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
-	// custom_values is an additional field used by non-exponential bucketing layouts.
-	//
-	// For custom buckets (-53 schema value) custom_values specify monotonically
-	// increasing upper inclusive boundaries for the bucket counts with arbitrary
-	// widths for this histogram. In other words, custom_values represents custom,
-	// explicit bucketing that could have been converted from the classic histograms.
-	//
-	// Those bounds are then referenced by spans in positive_spans with corresponding positive
-	// counts of deltas (refer to positive_spans for more details). This way we can
-	// have encode sparse histograms with custom bucketing (many buckets are often
-	// not used).
-	//
-	// Note that for custom bounds, even negative observations are placed in the positive
-	// counts to simplify the implementation and avoid ambiguity of where to place
-	// an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and
-	// the zero bucket are unused, if the schema indicates custom bucketing.
-	//
-	// For each upper boundary the previous boundary represent the lower exclusive
-	// boundary for that bucket. The first element is the upper inclusive boundary
-	// for the first bucket, which implicitly has a lower inclusive bound of -Inf.
-	// This is similar to "le" label semantics on classic histograms. You may add a
-	// bucket with an upper bound of 0 to make sure that you really have no negative
-	// observations, but in practice, native histogram rendering will show both with
-	// or without first upper boundary 0 and no negative counts as the same case.
-	//
-	// The last element is not only the upper inclusive bound of the last regular
-	// bucket, but implicitly the lower exclusive bound of the +Inf bucket.
-	CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"`
-}
-
-func (m *Histogram) Reset()      { *m = Histogram{} }
-func (*Histogram) ProtoMessage() {}
-func (*Histogram) Descriptor() ([]byte, []int) {
-	return fileDescriptor_affad2b75b7d03df, []int{6}
-}
-func (m *Histogram) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Histogram) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Histogram.Merge(m, src)
-}
-func (m *Histogram) XXX_Size() int {
-	return m.Size()
-}
-func (m *Histogram) XXX_DiscardUnknown() {
-	xxx_messageInfo_Histogram.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Histogram proto.InternalMessageInfo
-
-type isHistogram_Count interface {
-	isHistogram_Count()
-	Equal(interface{}) bool
-	MarshalTo([]byte) (int, error)
-	Size() int
-}
-type isHistogram_ZeroCount interface {
-	isHistogram_ZeroCount()
-	Equal(interface{}) bool
-	MarshalTo([]byte) (int, error)
-	Size() int
-}
-
-type Histogram_CountInt struct {
-	CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof"`
-}
-type Histogram_CountFloat struct {
-	CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof"`
-}
-type Histogram_ZeroCountInt struct {
-	ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof"`
-}
-type Histogram_ZeroCountFloat struct {
-	ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof"`
-}
-
-func (*Histogram_CountInt) isHistogram_Count()           {}
-func (*Histogram_CountFloat) isHistogram_Count()         {}
-func (*Histogram_ZeroCountInt) isHistogram_ZeroCount()   {}
-func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {}
-
-func (m *Histogram) GetCount() isHistogram_Count {
-	if m != nil {
-		return m.Count
-	}
-	return nil
-}
-func (m *Histogram) GetZeroCount() isHistogram_ZeroCount {
-	if m != nil {
-		return m.ZeroCount
-	}
-	return nil
-}
-
-func (m *Histogram) GetCountInt() uint64 {
-	if x, ok := m.GetCount().(*Histogram_CountInt); ok {
-		return x.CountInt
-	}
-	return 0
-}
-
-func (m *Histogram) GetCountFloat() float64 {
-	if x, ok := m.GetCount().(*Histogram_CountFloat); ok {
-		return x.CountFloat
-	}
-	return 0
-}
-
-func (m *Histogram) GetSum() float64 {
-	if m != nil {
-		return m.Sum
-	}
-	return 0
-}
-
-func (m *Histogram) GetSchema() int32 {
-	if m != nil {
-		return m.Schema
-	}
-	return 0
-}
-
-func (m *Histogram) GetZeroThreshold() float64 {
-	if m != nil {
-		return m.ZeroThreshold
-	}
-	return 0
-}
-
-func (m *Histogram) GetZeroCountInt() uint64 {
-	if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok {
-		return x.ZeroCountInt
-	}
-	return 0
-}
-
-func (m *Histogram) GetZeroCountFloat() float64 {
-	if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok {
-		return x.ZeroCountFloat
-	}
-	return 0
-}
-
-func (m *Histogram) GetNegativeSpans() []BucketSpan {
-	if m != nil {
-		return m.NegativeSpans
-	}
-	return nil
-}
-
-func (m *Histogram) GetNegativeDeltas() []int64 {
-	if m != nil {
-		return m.NegativeDeltas
-	}
-	return nil
-}
-
-func (m *Histogram) GetNegativeCounts() []float64 {
-	if m != nil {
-		return m.NegativeCounts
-	}
-	return nil
-}
-
-func (m *Histogram) GetPositiveSpans() []BucketSpan {
-	if m != nil {
-		return m.PositiveSpans
-	}
-	return nil
-}
-
-func (m *Histogram) GetPositiveDeltas() []int64 {
-	if m != nil {
-		return m.PositiveDeltas
-	}
-	return nil
-}
-
-func (m *Histogram) GetPositiveCounts() []float64 {
-	if m != nil {
-		return m.PositiveCounts
-	}
-	return nil
-}
-
-func (m *Histogram) GetResetHint() Histogram_ResetHint {
-	if m != nil {
-		return m.ResetHint
-	}
-	return RESET_HINT_UNSPECIFIED
-}
-
-func (m *Histogram) GetTimestamp() int64 {
-	if m != nil {
-		return m.Timestamp
-	}
-	return 0
-}
-
-func (m *Histogram) GetCustomValues() []float64 {
-	if m != nil {
-		return m.CustomValues
-	}
-	return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Histogram) XXX_OneofWrappers() []interface{} {
-	return []interface{}{
-		(*Histogram_CountInt)(nil),
-		(*Histogram_CountFloat)(nil),
-		(*Histogram_ZeroCountInt)(nil),
-		(*Histogram_ZeroCountFloat)(nil),
-	}
-}
-
-// A BucketSpan defines a number of consecutive buckets with their
-// offset. Logically, it would be more straightforward to include the
-// bucket counts in the Span. However, the protobuf representation is
-// more compact in the way the data is structured here (with all the
-// buckets in a single array separate from the Spans).
-type BucketSpan struct {
-	Offset int32  `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
-	Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"`
-}
-
-func (m *BucketSpan) Reset()      { *m = BucketSpan{} }
-func (*BucketSpan) ProtoMessage() {}
-func (*BucketSpan) Descriptor() ([]byte, []int) {
-	return fileDescriptor_affad2b75b7d03df, []int{7}
-}
-func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *BucketSpan) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_BucketSpan.Merge(m, src)
-}
-func (m *BucketSpan) XXX_Size() int {
-	return m.Size()
-}
-func (m *BucketSpan) XXX_DiscardUnknown() {
-	xxx_messageInfo_BucketSpan.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
-
-func (m *BucketSpan) GetOffset() int32 {
-	if m != nil {
-		return m.Offset
-	}
-	return 0
-}
-
-func (m *BucketSpan) GetLength() uint32 {
-	if m != nil {
-		return m.Length
-	}
-	return 0
-}
-
-func init() {
-	proto.RegisterEnum("cortexpbv2.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value)
-	proto.RegisterEnum("cortexpbv2.Metadata_MetricType", Metadata_MetricType_name, Metadata_MetricType_value)
-	proto.RegisterEnum("cortexpbv2.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value)
-	proto.RegisterType((*WriteRequest)(nil), "cortexpbv2.WriteRequest")
-	proto.RegisterType((*WriteResponse)(nil), "cortexpbv2.WriteResponse")
-	proto.RegisterType((*TimeSeries)(nil), "cortexpbv2.TimeSeries")
-	proto.RegisterType((*Exemplar)(nil), "cortexpbv2.Exemplar")
-	proto.RegisterType((*Sample)(nil), "cortexpbv2.Sample")
-	proto.RegisterType((*Metadata)(nil), "cortexpbv2.Metadata")
-	proto.RegisterType((*Histogram)(nil), "cortexpbv2.Histogram")
-	proto.RegisterType((*BucketSpan)(nil), "cortexpbv2.BucketSpan")
-}
-
-func init() { proto.RegisterFile("cortexv2.proto", fileDescriptor_affad2b75b7d03df) }
-
-var fileDescriptor_affad2b75b7d03df = []byte{
-	// 1094 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x4d, 0x6f, 0x1b, 0x45,
-	0x18, 0xf6, 0x78, 0x1d, 0x7f, 0xbc, 0x89, 0xdd, 0xcd, 0xe0, 0xb6, 0xdb, 0x50, 0x36, 0xc6, 0x15,
-	0x60, 0x81, 0x14, 0x24, 0x57, 0x42, 0x48, 0xad, 0x10, 0x71, 0xba, 0x89, 0x8d, 0x1a, 0x27, 0x1a,
-	0x6f, 0x82, 0xc2, 0x65, 0xb5, 0xb1, 0xc7, 0xf6, 0xaa, 0xfb, 0xc5, 0xce, 0x38, 0x6a, 0x38, 0xf1,
-	0x13, 0xf8, 0x09, 0x3d, 0xf2, 0x27, 0xb8, 0xf7, 0x98, 0x0b, 0x52, 0x85, 0x44, 0x45, 0x9c, 0x4b,
-	0x8f, 0xfd, 0x09, 0x68, 0x67, 0x3f, 0x93, 0x80, 0xe0, 0x36, 0xef, 0xf3, 0x3e, 0xcf, 0xec, 0x33,
-	0xcf, 0xcc, 0x6b, 0x19, 0x1a, 0x63, 0x2f, 0xe0, 0xf4, 0xe5, 0x59, 0x77, 0xcb, 0x0f, 0x3c, 0xee,
-	0x61, 0x88, 0x6a, 0xff, 0xf4, 0xac, 0xbb, 0xd1, 0x9c, 0x79, 0x33, 0x4f, 0xc0, 0x5f, 0x86, 0xab,
-	0x88, 0xd1, 0x7e, 0x55, 0x84, 0xb5, 0xef, 0x03, 0x8b, 0x53, 0x42, 0x7f, 0x5c, 0x50, 0xc6, 0xf1,
-	0x13, 0x28, 0x8f, 0xbc, 0x45, 0x30, 0xa6, 0x8a, 0xd4, 0x42, 0x9d, 0x46, 0xf7, 0xd1, 0x56, 0xb6,
-	0xc7, 0x56, 0x9e, 0xb9, 0x15, 0xd1, 0x34, 0x77, 0xe1, 0x90, 0x58, 0x82, 0x15, 0xa8, 0xb0, 0x73,
-	0xe7, 0xd4, 0xb3, 0x99, 0x52, 0x6a, 0x49, 0x9d, 0x1a, 0x49, 0x4a, 0xac, 0x03, 0x70, 0xcb, 0xa1,
-	0x8c, 0x06, 0x16, 0x65, 0xca, 0x4a, 0x4b, 0xea, 0xac, 0x76, 0xef, 0xe5, 0xb7, 0xd6, 0x2d, 0x87,
-	0x8e, 0x44, 0xb7, 0xf7, 0xf0, 0xf5, 0xdb, 0xcd, 0xc2, 0x1f, 0x6f, 0x37, 0x9b, 0x87, 0x01, 0x35,
-	0x6d, 0xdb, 0x1b, 0xeb, 0xa9, 0xf2, 0xb8, 0x4b, 0x72, 0xfb, 0xe0, 0xa7, 0xb0, 0xc1, 0x5e, 0x58,
-	0xbe, 0x61, 0x9b, 0xa7, 0xd4, 0x36, 0x5c, 0xd3, 0xa1, 0xc6, 0x99, 0x69, 0x5b, 0x13, 0x93, 0x5b,
-	0x9e, 0xab, 0xbc, 0xab, 0xb4, 0x50, 0xa7, 0x4a, 0xee, 0x87, 0x94, 0xe7, 0x21, 0x63, 0x68, 0x3a,
-	0xf4, 0x38, 0xed, 0xb7, 0x37, 0x01, 0xb2, 0x33, 0xe0, 0x0a, 0x48, 0xdb, 0x87, 0x03, 0xb9, 0x80,
-	0xab, 0x50, 0x22, 0x47, 0xcf, 0x35, 0x19, 0x7d, 0x57, 0xaa, 0x22, 0x59, 0x6a, 0xcf, 0xa0, 0x1e,
-	0x9f, 0x9b, 0xf9, 0x9e, 0xcb, 0xc4, 0x29, 0x47, 0xa6, 0xe3, 0xdb, 0x94, 0x29, 0xa8, 0x85, 0x3a,
-	0x12, 0x49, 0x4a, 0xac, 0x02, 0xf4, 0x2d, 0xc6, 0xbd, 0x59, 0x60, 0x3a, 0x4c, 0x29, 0x8a, 0x66,
-	0x0e, 0xc1, 0x0f, 0xa1, 0xa6, 0xbd, 0xa4, 0x8e, 0x6f, 0x9b, 0x01, 0x13, 0xf9, 0x4a, 0x24, 0x03,
-	0xda, 0xbf, 0x15, 0x01, 0xb2, 0x18, 0xf0, 0x26, 0xac, 0x8a, 0x73, 0x31, 0x23, 0xa0, 0xd3, 0xf0,
-	0x53, 0x52, 0xa7, 0x4e, 0x20, 0x82, 0x08, 0x9d, 0x32, 0xdc, 0x85, 0x0a, 0x8b, 0x7d, 0x14, 0x45,
-	0xa0, 0x38, 0x1f, 0x68, 0xe4, 0xa9, 0x57, 0x0a, 0xc3, 0x24, 0x09, 0x11, 0x3f, 0x01, 0x98, 0x67,
-	0x0e, 0x25, 0x21, 0xbb, 0x9b, 0x97, 0xa5, 0x6e, 0x63, 0x65, 0x8e, 0x8e, 0xbf, 0x86, 0x1a, 0x4d,
-	0xed, 0x97, 0x84, 0xb6, 0x99, 0xd7, 0x26, 0x47, 0x89, 0xa5, 0x19, 0x19, 0x7f, 0x05, 0x55, 0x87,
-	0x72, 0x73, 0x62, 0x72, 0x53, 0x59, 0x69, 0xa1, 0x9b, 0xc2, 0xfd, 0xb8, 0x17, 0x0b, 0x53, 0x2e,
-	0xfe, 0x02, 0xd6, 0xc7, 0x01, 0x35, 0x39, 0x9d, 0x18, 0xe2, 0xda, 0xb9, 0xe9, 0xf8, 0x4a, 0x59,
-	0x04, 0x27, 0xc7, 0x0d, 0x3d, 0xc1, 0xdb, 0x06, 0x54, 0x13, 0x07, 0xff, 0x1d, 0x5e, 0x13, 0x56,
-	0xce, 0x4c, 0x7b, 0x41, 0xc5, 0x2d, 0x21, 0x12, 0x15, 0xe1, 0x05, 0x65, 0xdf, 0x89, 0x2f, 0x28,
-	0x05, 0xda, 0x4f, 0xa1, 0x1c, 0xa5, 0x9a, 0xa9, 0xd1, 0xbf, 0xaa, 0x8b, 0x37, 0xd5, 0xbf, 0x17,
-	0xa1, 0x9a, 0x1c, 0x14, 0x3f, 0x86, 0x12, 0x3f, 0xf7, 0x23, 0x7d, 0xa3, 0xbb, 0xf9, 0x4f, 0x61,
-	0x84, 0x8b, 0xc0, 0x1a, 0xeb, 0xe7, 0x3e, 0x25, 0x82, 0x8c, 0x1f, 0x40, 0x75, 0x4e, 0x6d, 0x3f,
-	0x3c, 0x92, 0x30, 0x57, 0x27, 0x95, 0xb0, 0x26, 0x74, 0x1a, 0xb6, 0x16, 0xae, 0xc5, 0x45, 0xab,
-	0x14, 0xb5, 0xc2, 0x9a, 0xd0, 0x69, 0xfb, 0x4f, 0x04, 0x90, 0x6d, 0x85, 0x3f, 0x84, 0xfb, 0xfb,
-	0x9a, 0x4e, 0x06, 0x3b, 0x86, 0x7e, 0x72, 0xa8, 0x19, 0x47, 0xc3, 0xd1, 0xa1, 0xb6, 0x33, 0xd8,
-	0x1d, 0x68, 0xcf, 0xe4, 0x02, 0xbe, 0x0f, 0x1f, 0xe4, 0x9b, 0x3b, 0x07, 0x47, 0x43, 0x5d, 0x23,
-	0x32, 0xc2, 0x77, 0x61, 0x3d, 0xdf, 0xd8, 0xdb, 0x3e, 0xda, 0xd3, 0xe4, 0x22, 0x7e, 0x00, 0x77,
-	0xf3, 0x70, 0x7f, 0x30, 0xd2, 0x0f, 0xf6, 0xc8, 0xf6, 0xbe, 0x2c, 0x61, 0x15, 0x36, 0x6e, 0x29,
-	0xb2, 0x7e, 0xe9, 0xe6, 0xa7, 0x46, 0x47, 0xfb, 0xfb, 0xdb, 0xe4, 0x44, 0x5e, 0xc1, 0x4d, 0x90,
-	0xf3, 0x8d, 0xc1, 0x70, 0xf7, 0x40, 0x2e, 0x63, 0x05, 0x9a, 0xd7, 0xe8, 0xfa, 0xb6, 0xae, 0x8d,
-	0x34, 0x5d, 0xae, 0xb4, 0x5f, 0x95, 0xa1, 0x96, 0xbe, 0x5a, 0xfc, 0x11, 0xd4, 0xc6, 0xde, 0xc2,
-	0xe5, 0x86, 0xe5, 0x72, 0x91, 0x6e, 0xa9, 0x5f, 0x20, 0x55, 0x01, 0x0d, 0x5c, 0x8e, 0x3f, 0x86,
-	0xd5, 0xa8, 0x3d, 0xb5, 0x3d, 0x93, 0x47, 0x97, 0xdf, 0x2f, 0x10, 0x10, 0xe0, 0x6e, 0x88, 0x61,
-	0x19, 0x24, 0xb6, 0x70, 0x44, 0xc0, 0x88, 0x84, 0x4b, 0x7c, 0x0f, 0xca, 0x6c, 0x3c, 0xa7, 0x8e,
-	0x29, 0xa2, 0x5d, 0x27, 0x71, 0x85, 0x3f, 0x81, 0xc6, 0x4f, 0x34, 0xf0, 0x0c, 0x3e, 0x0f, 0x28,
-	0x9b, 0x7b, 0xf6, 0x44, 0xbc, 0x6d, 0x44, 0xea, 0x21, 0xaa, 0x27, 0x20, 0xfe, 0x34, 0xa6, 0x65,
-	0xbe, 0xca, 0xc2, 0x17, 0x22, 0x6b, 0x21, 0xbe, 0x93, 0x78, 0xfb, 0x1c, 0xe4, 0x1c, 0x2f, 0x32,
-	0x58, 0x11, 0x06, 0x11, 0x69, 0xa4, 0xcc, 0xc8, 0xe4, 0x0e, 0x34, 0x5c, 0x3a, 0x33, 0xb9, 0x75,
-	0x46, 0x0d, 0xe6, 0x9b, 0x2e, 0x53, 0xaa, 0xb7, 0x7f, 0x53, 0x7b, 0x8b, 0xf1, 0x0b, 0xca, 0x47,
-	0xbe, 0xe9, 0xc6, 0x83, 0x55, 0x4f, 0x34, 0x21, 0xc6, 0xf0, 0x67, 0x70, 0x27, 0xdd, 0x64, 0x42,
-	0x6d, 0x6e, 0x32, 0xa5, 0xd6, 0x92, 0x3a, 0x98, 0xa4, 0x7b, 0x3f, 0x13, 0xe8, 0x35, 0xa2, 0x70,
-	0xc7, 0x14, 0x68, 0x49, 0x1d, 0x94, 0x11, 0x85, 0x35, 0x16, 0xda, 0xf2, 0x3d, 0x66, 0xe5, 0x6c,
-	0xad, 0xfe, 0x1f, 0x5b, 0x89, 0x26, 0xb5, 0x95, 0x6e, 0x12, 0xdb, 0x5a, 0x8b, 0x6c, 0x25, 0x70,
-	0x66, 0x2b, 0x25, 0xc6, 0xb6, 0xea, 0x91, 0xad, 0x04, 0x8e, 0x6d, 0x7d, 0x03, 0x10, 0x50, 0x46,
-	0xb9, 0x31, 0x0f, 0xd3, 0x6f, 0xdc, 0x9e, 0xb9, 0xf4, 0xfd, 0x6c, 0x91, 0x90, 0xd7, 0xb7, 0x5c,
-	0x4e, 0x6a, 0x41, 0xb2, 0xbc, 0x3e, 0xd8, 0x77, 0x6e, 0x0c, 0x36, 0x7e, 0x04, 0xf5, 0xf1, 0x82,
-	0x71, 0xcf, 0x31, 0xc4, 0xcf, 0x00, 0x53, 0x64, 0x61, 0x62, 0x2d, 0x02, 0x8f, 0x05, 0xd6, 0x9e,
-	0x40, 0x2d, 0xdd, 0x1a, 0x6f, 0xc0, 0x3d, 0x12, 0xbe, 0x5e, 0xa3, 0x3f, 0x18, 0xea, 0x37, 0x46,
-	0x10, 0x43, 0x23, 0xd7, 0x3b, 0xd1, 0x46, 0x32, 0xc2, 0xeb, 0x50, 0xcf, 0x61, 0xc3, 0x03, 0xb9,
-	0x18, 0x4e, 0x49, 0x0e, 0x8a, 0xe6, 0x51, 0xea, 0x55, 0x60, 0x45, 0x04, 0xd1, 0x5b, 0x03, 0xc8,
-	0xde, 0x52, 0xfb, 0x29, 0x40, 0x16, 0x7a, 0xf8, 0x9c, 0xbd, 0xe9, 0x94, 0xd1, 0x68, 0x3e, 0xd6,
-	0x49, 0x5c, 0x85, 0xb8, 0x4d, 0xdd, 0x19, 0x9f, 0x8b, 0xb1, 0xa8, 0x93, 0xb8, 0xea, 0x7d, 0x7b,
-	0x71, 0xa9, 0x16, 0xde, 0x5c, 0xaa, 0x85, 0xf7, 0x97, 0x2a, 0xfa, 0x79, 0xa9, 0xa2, 0x5f, 0x97,
-	0x2a, 0x7a, 0xbd, 0x54, 0xd1, 0xc5, 0x52, 0x45, 0x7f, 0x2d, 0x55, 0xf4, 0x6e, 0xa9, 0x16, 0xde,
-	0x2f, 0x55, 0xf4, 0xcb, 0x95, 0x5a, 0xb8, 0xb8, 0x52, 0x0b, 0x6f, 0xae, 0xd4, 0xc2, 0x0f, 0xb9,
-	0xff, 0x1e, 0xa7, 0x65, 0xf1, 0x67, 0xe3, 0xf1, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x03, 0x9f,
-	0x7c, 0x56, 0xa0, 0x08, 0x00, 0x00,
-}
-
-func (x WriteRequest_SourceEnum) String() string {
-	s, ok := WriteRequest_SourceEnum_name[int32(x)]
-	if ok {
-		return s
-	}
-	return strconv.Itoa(int(x))
-}
-func (x Metadata_MetricType) String() string {
-	s, ok := Metadata_MetricType_name[int32(x)]
-	if ok {
-		return s
-	}
-	return strconv.Itoa(int(x))
-}
-func (x Histogram_ResetHint) String() string {
-	s, ok := Histogram_ResetHint_name[int32(x)]
-	if ok {
-		return s
-	}
-	return strconv.Itoa(int(x))
-}
-func (this *WriteRequest) Equal(that interface{}) bool {
-	if that == nil {
-		return this == nil
-	}
-
-	that1, ok := that.(*WriteRequest)
-	if !ok {
-		that2, ok := that.(WriteRequest)
-		if ok {
-			that1 = &that2
-		} else {
-			return false
-		}
-	}
-	if that1 == nil {
-		return this == nil
-	} else if this == nil {
-		return false
-	}
-	if this.Source != that1.Source {
-		return false
-	}
-	if len(this.Symbols) != len(that1.Symbols) {
-		return false
-	}
-	for i := range this.Symbols {
-		if this.Symbols[i] != that1.Symbols[i] {
-			return false
-		}
-	}
-	if len(this.Timeseries) != len(that1.Timeseries) {
-		return false
-	}
-	for i := range this.Timeseries {
-		if !this.Timeseries[i].Equal(that1.Timeseries[i]) {
-			return false
-		}
-	}
-	if this.SkipLabelNameValidation != that1.SkipLabelNameValidation {
-		return false
-	}
-	return true
-}
-func (this *WriteResponse) Equal(that interface{}) bool {
-	if that == nil {
-		return this == nil
-	}
-
-	that1, ok := that.(*WriteResponse)
-	if !ok {
-		that2, ok := that.(WriteResponse)
-		if ok {
-			that1 = &that2
-		} else {
-			return false
-		}
-	}
-	if that1 == nil {
-		return this == nil
-	} else if this == nil {
-		return false
-	}
-	if this.Samples != that1.Samples {
-		return false
-	}
-	if this.Histograms != that1.Histograms {
-		return false
-	}
-	if this.Exemplars != that1.Exemplars {
-		return false
-	}
-	return true
-}
-func (this *TimeSeries) Equal(that interface{}) bool {
-	if that == nil {
-		return this == nil
-	}
-
-	that1, ok := that.(*TimeSeries)
-	if !ok {
-		that2, ok := that.(TimeSeries)
-		if ok {
-			that1 = &that2
-		} else {
-			return false
-		}
-	}
-	if that1 == nil {
-		return this == nil
-	} else if this == nil {
-		return false
-	}
-	if len(this.LabelsRefs) != len(that1.LabelsRefs) {
-		return false
-	}
-	for i := range this.LabelsRefs {
-		if this.LabelsRefs[i] != that1.LabelsRefs[i] {
-			return false
-		}
-	}
-	if len(this.Samples) != len(that1.Samples) {
-		return false
-	}
-	for i := range this.Samples {
-		if !this.Samples[i].Equal(&that1.Samples[i]) {
-			return false
-		}
-	}
-	if len(this.Histograms) != len(that1.Histograms) {
-		return false
-	}
-	for i := range this.Histograms {
-		if !this.Histograms[i].Equal(&that1.Histograms[i]) {
-			return false
-		}
-	}
-	if len(this.Exemplars) != len(that1.Exemplars) {
-		return false
-	}
-	for i := range this.Exemplars {
-		if !this.Exemplars[i].Equal(&that1.Exemplars[i]) {
-			return false
-		}
-	}
-	if !this.Metadata.Equal(&that1.Metadata) {
-		return false
-	}
-	if this.CreatedTimestamp != that1.CreatedTimestamp {
-		return false
-	}
-	return true
-}
-func (this *Exemplar) Equal(that interface{}) bool {
-	if that == nil {
-		return this == nil
-	}
-
-	that1, ok := that.(*Exemplar)
-	if !ok {
-		that2, ok := that.(Exemplar)
-		if ok {
-			that1 = &that2
-		} else {
-			return false
-		}
-	}
-	if that1 == nil {
-		return this == nil
-	} else if this == nil {
-		return false
-	}
-	if len(this.LabelsRefs) != len(that1.LabelsRefs) {
-		return false
-	}
-	for i := range this.LabelsRefs {
-		if this.LabelsRefs[i] != that1.LabelsRefs[i] {
-			return false
-		}
-	}
-	if this.Value != that1.Value {
-		return false
-	}
-	if this.Timestamp != that1.Timestamp {
-		return false
-	}
-	return true
-}
-func (this *Sample) Equal(that interface{}) bool {
-	if that == nil {
-		return this == nil
-	}
-
-	that1, ok := that.(*Sample)
-	if !ok {
-		that2, ok := that.(Sample)
-		if ok {
-			that1 = &that2
-		} else {
-			return false
-		}
-	}
-	if that1 == nil {
-		return this == nil
-	} else if this == nil {
-		return false
-	}
-	if this.Value != that1.Value {
-		return false
-	}
-	if this.Timestamp != that1.Timestamp {
-		return false
-	}
-	return true
-}
-func (this *Metadata) Equal(that interface{}) bool {
-	if that == nil {
-		return this == nil
-	}
-
-	that1, ok := that.(*Metadata)
-	if !ok {
-		that2, ok := that.(Metadata)
-		if ok {
-			that1 = &that2
-		} else {
-			return false
-		}
-	}
-	if that1 == nil {
-		return this == nil
-	} else if this == nil {
-		return false
-	}
-	if this.Type != that1.Type {
-		return false
-	}
-	if this.HelpRef != that1.HelpRef {
-		return false
-	}
-	if this.UnitRef != that1.UnitRef {
-		return false
-	}
-	return true
-}
-func (this *Histogram) Equal(that interface{}) bool {
-	if that == nil {
-		return this == nil
-	}
-
-	that1, ok := that.(*Histogram)
-	if !ok {
-		that2, ok := that.(Histogram)
-		if ok {
-			that1 = &that2
-		} else {
-			return false
-		}
-	}
-	if that1 == nil {
-		return this == nil
-	} else if this == nil {
-		return false
-	}
-	if that1.Count == nil {
-		if this.Count != nil {
-			return false
-		}
-	} else if this.Count == nil {
-		return false
-	} else if !this.Count.Equal(that1.Count) {
-		return false
-	}
-	if this.Sum != that1.Sum {
-		return false
-	}
-	if this.Schema != that1.Schema {
-		return false
-	}
-	if this.ZeroThreshold != that1.ZeroThreshold {
-		return false
-	}
-	if that1.ZeroCount == nil {
-		if this.ZeroCount != nil {
-			return false
-		}
-	} else if this.ZeroCount == nil {
-		return false
-	} else if !this.ZeroCount.Equal(that1.ZeroCount) {
-		return false
-	}
-	if len(this.NegativeSpans) != len(that1.NegativeSpans) {
-		return false
-	}
-	for i := range this.NegativeSpans {
-		if !this.NegativeSpans[i].Equal(&that1.NegativeSpans[i]) {
-			return false
-		}
-	}
-	if len(this.NegativeDeltas) != len(that1.NegativeDeltas) {
-		return false
-	}
-	for i := range this.NegativeDeltas {
-		if this.NegativeDeltas[i] != that1.NegativeDeltas[i] {
-			return false
-		}
-	}
-	if len(this.NegativeCounts) != len(that1.NegativeCounts) {
-		return false
-	}
-	for i := range this.NegativeCounts {
-		if this.NegativeCounts[i] != that1.NegativeCounts[i] {
-			return false
-		}
-	}
-	if len(this.PositiveSpans) != len(that1.PositiveSpans) {
-		return false
-	}
-	for i := range this.PositiveSpans {
-		if !this.PositiveSpans[i].Equal(&that1.PositiveSpans[i]) {
-			return false
-		}
-	}
-	if len(this.PositiveDeltas) != len(that1.PositiveDeltas) {
-		return false
-	}
-	for i := range this.PositiveDeltas {
-		if this.PositiveDeltas[i] != that1.PositiveDeltas[i] {
-			return false
-		}
-	}
-	if len(this.PositiveCounts) != len(that1.PositiveCounts) {
-		return false
-	}
-	for i := range this.PositiveCounts {
-		if this.PositiveCounts[i] != that1.PositiveCounts[i] {
-			return false
-		}
-	}
-	if this.ResetHint != that1.ResetHint {
-		return false
-	}
-	if this.Timestamp != that1.Timestamp {
-		return false
-	}
-	if len(this.CustomValues) != len(that1.CustomValues) {
-		return false
-	}
-	for i := range this.CustomValues {
-		if this.CustomValues[i] != that1.CustomValues[i] {
-			return false
-		}
-	}
-	return true
-}
-func (this *Histogram_CountInt) Equal(that interface{}) bool {
-	if that == nil {
-		return this == nil
-	}
-
-	that1, ok := that.(*Histogram_CountInt)
-	if !ok {
-		that2, ok := that.(Histogram_CountInt)
-		if ok {
-			that1 = &that2
-		} else {
-			return false
-		}
-	}
-	if that1 == nil {
-		return this == nil
-	} else if this == nil {
-		return false
-	}
-	if this.CountInt != that1.CountInt {
-		return false
-	}
-	return true
-}
-func (this *Histogram_CountFloat) Equal(that interface{}) bool {
-	if that == nil {
-		return this == nil
-	}
-
-	that1, ok := that.(*Histogram_CountFloat)
-	if !ok {
-		that2, ok := that.(Histogram_CountFloat)
-		if ok {
-			that1 = &that2
-		} else {
-			return false
-		}
-	}
-	if that1 == nil {
-		return this == nil
-	} else if this == nil {
-		return false
-	}
-	if this.CountFloat != that1.CountFloat {
-		return false
-	}
-	return true
-}
-func (this *Histogram_ZeroCountInt) Equal(that interface{}) bool {
-	if that == nil {
-		return this == nil
-	}
-
-	that1, ok := that.(*Histogram_ZeroCountInt)
-	if !ok {
-		that2, ok := that.(Histogram_ZeroCountInt)
-		if ok {
-			that1 = &that2
-		} else {
-			return false
-		}
-	}
-	if that1 == nil {
-		return this == nil
-	} else if this == nil {
-		return false
-	}
-	if this.ZeroCountInt != that1.ZeroCountInt {
-		return false
-	}
-	return true
-}
-func (this *Histogram_ZeroCountFloat) Equal(that interface{}) bool {
-	if that == nil {
-		return this == nil
-	}
-
-	that1, ok := that.(*Histogram_ZeroCountFloat)
-	if !ok {
-		that2, ok := that.(Histogram_ZeroCountFloat)
-		if ok {
-			that1 = &that2
-		} else {
-			return false
-		}
-	}
-	if that1 == nil {
-		return this == nil
-	} else if this == nil {
-		return false
-	}
-	if this.ZeroCountFloat != that1.ZeroCountFloat {
-		return false
-	}
-	return true
-}
-func (this *BucketSpan) Equal(that interface{}) bool {
-	if that == nil {
-		return this == nil
-	}
-
-	that1, ok := that.(*BucketSpan)
-	if !ok {
-		that2, ok := that.(BucketSpan)
-		if ok {
-			that1 = &that2
-		} else {
-			return false
-		}
-	}
-	if that1 == nil {
-		return this == nil
-	} else if this == nil {
-		return false
-	}
-	if this.Offset != that1.Offset {
-		return false
-	}
-	if this.Length != that1.Length {
-		return false
-	}
-	return true
-}
-func (this *WriteRequest) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 8)
-	s = append(s, "&cortexpbv2.WriteRequest{")
-	s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n")
-	s = append(s, "Symbols: "+fmt.Sprintf("%#v", this.Symbols)+",\n")
-	s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n")
-	s = append(s, "SkipLabelNameValidation: "+fmt.Sprintf("%#v", this.SkipLabelNameValidation)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *WriteResponse) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 7)
-	s = append(s, "&cortexpbv2.WriteResponse{")
-	s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n")
-	s = append(s, "Histograms: "+fmt.Sprintf("%#v", this.Histograms)+",\n")
-	s = append(s, "Exemplars: "+fmt.Sprintf("%#v", this.Exemplars)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *TimeSeries) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 10)
-	s = append(s, "&cortexpbv2.TimeSeries{")
-	s = append(s, "LabelsRefs: "+fmt.Sprintf("%#v", this.LabelsRefs)+",\n")
-	if this.Samples != nil {
-		vs := make([]*Sample, len(this.Samples))
-		for i := range vs {
-			vs[i] = &this.Samples[i]
-		}
-		s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n")
-	}
-	if this.Histograms != nil {
-		vs := make([]*Histogram, len(this.Histograms))
-		for i := range vs {
-			vs[i] = &this.Histograms[i]
-		}
-		s = append(s, "Histograms: "+fmt.Sprintf("%#v", vs)+",\n")
-	}
-	if this.Exemplars != nil {
-		vs := make([]*Exemplar, len(this.Exemplars))
-		for i := range vs {
-			vs[i] = &this.Exemplars[i]
-		}
-		s = append(s, "Exemplars: "+fmt.Sprintf("%#v", vs)+",\n")
-	}
-	s = append(s, "Metadata: "+strings.Replace(this.Metadata.GoString(), `&`, ``, 1)+",\n")
-	s = append(s, "CreatedTimestamp: "+fmt.Sprintf("%#v", this.CreatedTimestamp)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *Exemplar) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 7)
-	s = append(s, "&cortexpbv2.Exemplar{")
-	s = append(s, "LabelsRefs: "+fmt.Sprintf("%#v", this.LabelsRefs)+",\n")
-	s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
-	s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *Sample) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 6)
-	s = append(s, "&cortexpbv2.Sample{")
-	s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
-	s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *Metadata) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 7)
-	s = append(s, "&cortexpbv2.Metadata{")
-	s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
-	s = append(s, "HelpRef: "+fmt.Sprintf("%#v", this.HelpRef)+",\n")
-	s = append(s, "UnitRef: "+fmt.Sprintf("%#v", this.UnitRef)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *Histogram) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 20)
-	s = append(s, "&cortexpbv2.Histogram{")
-	if this.Count != nil {
-		s = append(s, "Count: "+fmt.Sprintf("%#v", this.Count)+",\n")
-	}
-	s = append(s, "Sum: "+fmt.Sprintf("%#v", this.Sum)+",\n")
-	s = append(s, "Schema: "+fmt.Sprintf("%#v", this.Schema)+",\n")
-	s = append(s, "ZeroThreshold: "+fmt.Sprintf("%#v", this.ZeroThreshold)+",\n")
-	if this.ZeroCount != nil {
-		s = append(s, "ZeroCount: "+fmt.Sprintf("%#v", this.ZeroCount)+",\n")
-	}
-	if this.NegativeSpans != nil {
-		vs := make([]*BucketSpan, len(this.NegativeSpans))
-		for i := range vs {
-			vs[i] = &this.NegativeSpans[i]
-		}
-		s = append(s, "NegativeSpans: "+fmt.Sprintf("%#v", vs)+",\n")
-	}
-	s = append(s, "NegativeDeltas: "+fmt.Sprintf("%#v", this.NegativeDeltas)+",\n")
-	s = append(s, "NegativeCounts: "+fmt.Sprintf("%#v", this.NegativeCounts)+",\n")
-	if this.PositiveSpans != nil {
-		vs := make([]*BucketSpan, len(this.PositiveSpans))
-		for i := range vs {
-			vs[i] = &this.PositiveSpans[i]
-		}
-		s = append(s, "PositiveSpans: "+fmt.Sprintf("%#v", vs)+",\n")
-	}
-	s = append(s, "PositiveDeltas: "+fmt.Sprintf("%#v", this.PositiveDeltas)+",\n")
-	s = append(s, "PositiveCounts: "+fmt.Sprintf("%#v", this.PositiveCounts)+",\n")
-	s = append(s, "ResetHint: "+fmt.Sprintf("%#v", this.ResetHint)+",\n")
-	s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
-	s = append(s, "CustomValues: "+fmt.Sprintf("%#v", this.CustomValues)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *Histogram_CountInt) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&cortexpbv2.Histogram_CountInt{` +
-		`CountInt:` + fmt.Sprintf("%#v", this.CountInt) + `}`}, ", ")
-	return s
-}
-func (this *Histogram_CountFloat) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&cortexpbv2.Histogram_CountFloat{` +
-		`CountFloat:` + fmt.Sprintf("%#v", this.CountFloat) + `}`}, ", ")
-	return s
-}
-func (this *Histogram_ZeroCountInt) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&cortexpbv2.Histogram_ZeroCountInt{` +
-		`ZeroCountInt:` + fmt.Sprintf("%#v", this.ZeroCountInt) + `}`}, ", ")
-	return s
-}
-func (this *Histogram_ZeroCountFloat) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&cortexpbv2.Histogram_ZeroCountFloat{` +
-		`ZeroCountFloat:` + fmt.Sprintf("%#v", this.ZeroCountFloat) + `}`}, ", ")
-	return s
-}
-func (this *BucketSpan) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 6)
-	s = append(s, "&cortexpbv2.BucketSpan{")
-	s = append(s, "Offset: "+fmt.Sprintf("%#v", this.Offset)+",\n")
-	s = append(s, "Length: "+fmt.Sprintf("%#v", this.Length)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func valueToGoStringCortexv2(v interface{}, typ string) string {
-	rv := reflect.ValueOf(v)
-	if rv.IsNil() {
-		return "nil"
-	}
-	pv := reflect.Indirect(rv).Interface()
-	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
-}
-func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.SkipLabelNameValidation {
-		i--
-		if m.SkipLabelNameValidation {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x3e
-		i--
-		dAtA[i] = 0xc0
-	}
-	if len(m.Timeseries) > 0 {
-		for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size := m.Timeseries[iNdEx].Size()
-				i -= size
-				if _, err := m.Timeseries[iNdEx].MarshalTo(dAtA[i:]); err != nil {
-					return 0, err
-				}
-				i = encodeVarintCortexv2(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x2a
-		}
-	}
-	if len(m.Symbols) > 0 {
-		for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.Symbols[iNdEx])
-			copy(dAtA[i:], m.Symbols[iNdEx])
-			i = encodeVarintCortexv2(dAtA, i, uint64(len(m.Symbols[iNdEx])))
-			i--
-			dAtA[i] = 0x22
-		}
-	}
-	if m.Source != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64(m.Source))
-		i--
-		dAtA[i] = 0x18
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *WriteResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *WriteResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WriteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.Exemplars != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64(m.Exemplars))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.Histograms != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64(m.Histograms))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Samples != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64(m.Samples))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *TimeSeries) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.CreatedTimestamp != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64(m.CreatedTimestamp))
-		i--
-		dAtA[i] = 0x30
-	}
-	{
-		size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
-		}
-		i -= size
-		i = encodeVarintCortexv2(dAtA, i, uint64(size))
-	}
-	i--
-	dAtA[i] = 0x2a
-	if len(m.Exemplars) > 0 {
-		for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintCortexv2(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x22
-		}
-	}
-	if len(m.Histograms) > 0 {
-		for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintCortexv2(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if len(m.Samples) > 0 {
-		for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintCortexv2(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if len(m.LabelsRefs) > 0 {
-		dAtA3 := make([]byte, len(m.LabelsRefs)*10)
-		var j2 int
-		for _, num := range m.LabelsRefs {
-			for num >= 1<<7 {
-				dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80)
-				num >>= 7
-				j2++
-			}
-			dAtA3[j2] = uint8(num)
-			j2++
-		}
-		i -= j2
-		copy(dAtA[i:], dAtA3[:j2])
-		i = encodeVarintCortexv2(dAtA, i, uint64(j2))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Exemplar) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.Timestamp != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64(m.Timestamp))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.Value != 0 {
-		i -= 8
-		encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
-		i--
-		dAtA[i] = 0x11
-	}
-	if len(m.LabelsRefs) > 0 {
-		dAtA5 := make([]byte, len(m.LabelsRefs)*10)
-		var j4 int
-		for _, num := range m.LabelsRefs {
-			for num >= 1<<7 {
-				dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80)
-				num >>= 7
-				j4++
-			}
-			dAtA5[j4] = uint8(num)
-			j4++
-		}
-		i -= j4
-		copy(dAtA[i:], dAtA5[:j4])
-		i = encodeVarintCortexv2(dAtA, i, uint64(j4))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Sample) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Sample) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.Timestamp != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64(m.Timestamp))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Value != 0 {
-		i -= 8
-		encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
-		i--
-		dAtA[i] = 0x9
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Metadata) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.UnitRef != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64(m.UnitRef))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.HelpRef != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64(m.HelpRef))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.Type != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64(m.Type))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Histogram) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Histogram) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if len(m.CustomValues) > 0 {
-		for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- {
-			f6 := math.Float64bits(float64(m.CustomValues[iNdEx]))
-			i -= 8
-			encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6))
-		}
-		i = encodeVarintCortexv2(dAtA, i, uint64(len(m.CustomValues)*8))
-		i--
-		dAtA[i] = 0x1
-		i--
-		dAtA[i] = 0x82
-	}
-	if m.Timestamp != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64(m.Timestamp))
-		i--
-		dAtA[i] = 0x78
-	}
-	if m.ResetHint != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64(m.ResetHint))
-		i--
-		dAtA[i] = 0x70
-	}
-	if len(m.PositiveCounts) > 0 {
-		for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- {
-			f7 := math.Float64bits(float64(m.PositiveCounts[iNdEx]))
-			i -= 8
-			encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f7))
-		}
-		i = encodeVarintCortexv2(dAtA, i, uint64(len(m.PositiveCounts)*8))
-		i--
-		dAtA[i] = 0x6a
-	}
-	if len(m.PositiveDeltas) > 0 {
-		var j8 int
-		dAtA10 := make([]byte, len(m.PositiveDeltas)*10)
-		for _, num := range m.PositiveDeltas {
-			x9 := (uint64(num) << 1) ^ uint64((num >> 63))
-			for x9 >= 1<<7 {
-				dAtA10[j8] = uint8(uint64(x9)&0x7f | 0x80)
-				j8++
-				x9 >>= 7
-			}
-			dAtA10[j8] = uint8(x9)
-			j8++
-		}
-		i -= j8
-		copy(dAtA[i:], dAtA10[:j8])
-		i = encodeVarintCortexv2(dAtA, i, uint64(j8))
-		i--
-		dAtA[i] = 0x62
-	}
-	if len(m.PositiveSpans) > 0 {
-		for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintCortexv2(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x5a
-		}
-	}
-	if len(m.NegativeCounts) > 0 {
-		for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- {
-			f11 := math.Float64bits(float64(m.NegativeCounts[iNdEx]))
-			i -= 8
-			encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f11))
-		}
-		i = encodeVarintCortexv2(dAtA, i, uint64(len(m.NegativeCounts)*8))
-		i--
-		dAtA[i] = 0x52
-	}
-	if len(m.NegativeDeltas) > 0 {
-		var j12 int
-		dAtA14 := make([]byte, len(m.NegativeDeltas)*10)
-		for _, num := range m.NegativeDeltas {
-			x13 := (uint64(num) << 1) ^ uint64((num >> 63))
-			for x13 >= 1<<7 {
-				dAtA14[j12] = uint8(uint64(x13)&0x7f | 0x80)
-				j12++
-				x13 >>= 7
-			}
-			dAtA14[j12] = uint8(x13)
-			j12++
-		}
-		i -= j12
-		copy(dAtA[i:], dAtA14[:j12])
-		i = encodeVarintCortexv2(dAtA, i, uint64(j12))
-		i--
-		dAtA[i] = 0x4a
-	}
-	if len(m.NegativeSpans) > 0 {
-		for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintCortexv2(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x42
-		}
-	}
-	if m.ZeroCount != nil {
-		{
-			size := m.ZeroCount.Size()
-			i -= size
-			if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil {
-				return 0, err
-			}
-		}
-	}
-	if m.ZeroThreshold != 0 {
-		i -= 8
-		encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold))))
-		i--
-		dAtA[i] = 0x29
-	}
-	if m.Schema != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31))))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.Sum != 0 {
-		i -= 8
-		encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
-		i--
-		dAtA[i] = 0x19
-	}
-	if m.Count != nil {
-		{
-			size := m.Count.Size()
-			i -= size
-			if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil {
-				return 0, err
-			}
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	i = encodeVarintCortexv2(dAtA, i, uint64(m.CountInt))
-	i--
-	dAtA[i] = 0x8
-	return len(dAtA) - i, nil
-}
-func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	i -= 8
-	encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat))))
-	i--
-	dAtA[i] = 0x11
-	return len(dAtA) - i, nil
-}
-func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	i = encodeVarintCortexv2(dAtA, i, uint64(m.ZeroCountInt))
-	i--
-	dAtA[i] = 0x30
-	return len(dAtA) - i, nil
-}
-func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	i -= 8
-	encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat))))
-	i--
-	dAtA[i] = 0x39
-	return len(dAtA) - i, nil
-}
-func (m *BucketSpan) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.Length != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64(m.Length))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Offset != 0 {
-		i = encodeVarintCortexv2(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31))))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintCortexv2(dAtA []byte, offset int, v uint64) int {
-	offset -= sovCortexv2(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *WriteRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Source != 0 {
-		n += 1 + sovCortexv2(uint64(m.Source))
-	}
-	if len(m.Symbols) > 0 {
-		for _, s := range m.Symbols {
-			l = len(s)
-			n += 1 + l + sovCortexv2(uint64(l))
-		}
-	}
-	if len(m.Timeseries) > 0 {
-		for _, e := range m.Timeseries {
-			l = e.Size()
-			n += 1 + l + sovCortexv2(uint64(l))
-		}
-	}
-	if m.SkipLabelNameValidation {
-		n += 3
-	}
-	return n
-}
-
-func (m *WriteResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Samples != 0 {
-		n += 1 + sovCortexv2(uint64(m.Samples))
-	}
-	if m.Histograms != 0 {
-		n += 1 + sovCortexv2(uint64(m.Histograms))
-	}
-	if m.Exemplars != 0 {
-		n += 1 + sovCortexv2(uint64(m.Exemplars))
-	}
-	return n
-}
-
-func (m *TimeSeries) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if len(m.LabelsRefs) > 0 {
-		l = 0
-		for _, e := range m.LabelsRefs {
-			l += sovCortexv2(uint64(e))
-		}
-		n += 1 + sovCortexv2(uint64(l)) + l
-	}
-	if len(m.Samples) > 0 {
-		for _, e := range m.Samples {
-			l = e.Size()
-			n += 1 + l + sovCortexv2(uint64(l))
-		}
-	}
-	if len(m.Histograms) > 0 {
-		for _, e := range m.Histograms {
-			l = e.Size()
-			n += 1 + l + sovCortexv2(uint64(l))
-		}
-	}
-	if len(m.Exemplars) > 0 {
-		for _, e := range m.Exemplars {
-			l = e.Size()
-			n += 1 + l + sovCortexv2(uint64(l))
-		}
-	}
-	l = m.Metadata.Size()
-	n += 1 + l + sovCortexv2(uint64(l))
-	if m.CreatedTimestamp != 0 {
-		n += 1 + sovCortexv2(uint64(m.CreatedTimestamp))
-	}
-	return n
-}
-
-func (m *Exemplar) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if len(m.LabelsRefs) > 0 {
-		l = 0
-		for _, e := range m.LabelsRefs {
-			l += sovCortexv2(uint64(e))
-		}
-		n += 1 + sovCortexv2(uint64(l)) + l
-	}
-	if m.Value != 0 {
-		n += 9
-	}
-	if m.Timestamp != 0 {
-		n += 1 + sovCortexv2(uint64(m.Timestamp))
-	}
-	return n
-}
-
-func (m *Sample) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Value != 0 {
-		n += 9
-	}
-	if m.Timestamp != 0 {
-		n += 1 + sovCortexv2(uint64(m.Timestamp))
-	}
-	return n
-}
-
-func (m *Metadata) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Type != 0 {
-		n += 1 + sovCortexv2(uint64(m.Type))
-	}
-	if m.HelpRef != 0 {
-		n += 1 + sovCortexv2(uint64(m.HelpRef))
-	}
-	if m.UnitRef != 0 {
-		n += 1 + sovCortexv2(uint64(m.UnitRef))
-	}
-	return n
-}
-
-func (m *Histogram) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Count != nil {
-		n += m.Count.Size()
-	}
-	if m.Sum != 0 {
-		n += 9
-	}
-	if m.Schema != 0 {
-		n += 1 + sozCortexv2(uint64(m.Schema))
-	}
-	if m.ZeroThreshold != 0 {
-		n += 9
-	}
-	if m.ZeroCount != nil {
-		n += m.ZeroCount.Size()
-	}
-	if len(m.NegativeSpans) > 0 {
-		for _, e := range m.NegativeSpans {
-			l = e.Size()
-			n += 1 + l + sovCortexv2(uint64(l))
-		}
-	}
-	if len(m.NegativeDeltas) > 0 {
-		l = 0
-		for _, e := range m.NegativeDeltas {
-			l += sozCortexv2(uint64(e))
-		}
-		n += 1 + sovCortexv2(uint64(l)) + l
-	}
-	if len(m.NegativeCounts) > 0 {
-		n += 1 + sovCortexv2(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8
-	}
-	if len(m.PositiveSpans) > 0 {
-		for _, e := range m.PositiveSpans {
-			l = e.Size()
-			n += 1 + l + sovCortexv2(uint64(l))
-		}
-	}
-	if len(m.PositiveDeltas) > 0 {
-		l = 0
-		for _, e := range m.PositiveDeltas {
-			l += sozCortexv2(uint64(e))
-		}
-		n += 1 + sovCortexv2(uint64(l)) + l
-	}
-	if len(m.PositiveCounts) > 0 {
-		n += 1 + sovCortexv2(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8
-	}
-	if m.ResetHint != 0 {
-		n += 1 + sovCortexv2(uint64(m.ResetHint))
-	}
-	if m.Timestamp != 0 {
-		n += 1 + sovCortexv2(uint64(m.Timestamp))
-	}
-	if len(m.CustomValues) > 0 {
-		n += 2 + sovCortexv2(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8
-	}
-	return n
-}
-
-func (m *Histogram_CountInt) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovCortexv2(uint64(m.CountInt))
-	return n
-}
-func (m *Histogram_CountFloat) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 9
-	return n
-}
-func (m *Histogram_ZeroCountInt) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovCortexv2(uint64(m.ZeroCountInt))
-	return n
-}
-func (m *Histogram_ZeroCountFloat) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 9
-	return n
-}
-func (m *BucketSpan) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Offset != 0 {
-		n += 1 + sozCortexv2(uint64(m.Offset))
-	}
-	if m.Length != 0 {
-		n += 1 + sovCortexv2(uint64(m.Length))
-	}
-	return n
-}
-
-func sovCortexv2(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozCortexv2(x uint64) (n int) {
-	return sovCortexv2(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *WriteRequest) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&WriteRequest{`,
-		`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
-		`Symbols:` + fmt.Sprintf("%v", this.Symbols) + `,`,
-		`Timeseries:` + fmt.Sprintf("%v", this.Timeseries) + `,`,
-		`SkipLabelNameValidation:` + fmt.Sprintf("%v", this.SkipLabelNameValidation) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *WriteResponse) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&WriteResponse{`,
-		`Samples:` + fmt.Sprintf("%v", this.Samples) + `,`,
-		`Histograms:` + fmt.Sprintf("%v", this.Histograms) + `,`,
-		`Exemplars:` + fmt.Sprintf("%v", this.Exemplars) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *TimeSeries) String() string {
-	if this == nil {
-		return "nil"
-	}
-	repeatedStringForSamples := "[]Sample{"
-	for _, f := range this.Samples {
-		repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + ","
-	}
-	repeatedStringForSamples += "}"
-	repeatedStringForHistograms := "[]Histogram{"
-	for _, f := range this.Histograms {
-		repeatedStringForHistograms += strings.Replace(strings.Replace(f.String(), "Histogram", "Histogram", 1), `&`, ``, 1) + ","
-	}
-	repeatedStringForHistograms += "}"
-	repeatedStringForExemplars := "[]Exemplar{"
-	for _, f := range this.Exemplars {
-		repeatedStringForExemplars += strings.Replace(strings.Replace(f.String(), "Exemplar", "Exemplar", 1), `&`, ``, 1) + ","
-	}
-	repeatedStringForExemplars += "}"
-	s := strings.Join([]string{`&TimeSeries{`,
-		`LabelsRefs:` + fmt.Sprintf("%v", this.LabelsRefs) + `,`,
-		`Samples:` + repeatedStringForSamples + `,`,
-		`Histograms:` + repeatedStringForHistograms + `,`,
-		`Exemplars:` + repeatedStringForExemplars + `,`,
-		`Metadata:` + strings.Replace(strings.Replace(this.Metadata.String(), "Metadata", "Metadata", 1), `&`, ``, 1) + `,`,
-		`CreatedTimestamp:` + fmt.Sprintf("%v", this.CreatedTimestamp) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *Exemplar) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&Exemplar{`,
-		`LabelsRefs:` + fmt.Sprintf("%v", this.LabelsRefs) + `,`,
-		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
-		`Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *Sample) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&Sample{`,
-		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
-		`Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *Metadata) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&Metadata{`,
-		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
-		`HelpRef:` + fmt.Sprintf("%v", this.HelpRef) + `,`,
-		`UnitRef:` + fmt.Sprintf("%v", this.UnitRef) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *Histogram) String() string {
-	if this == nil {
-		return "nil"
-	}
-	repeatedStringForNegativeSpans := "[]BucketSpan{"
-	for _, f := range this.NegativeSpans {
-		repeatedStringForNegativeSpans += strings.Replace(strings.Replace(f.String(), "BucketSpan", "BucketSpan", 1), `&`, ``, 1) + ","
-	}
-	repeatedStringForNegativeSpans += "}"
-	repeatedStringForPositiveSpans := "[]BucketSpan{"
-	for _, f := range this.PositiveSpans {
-		repeatedStringForPositiveSpans += strings.Replace(strings.Replace(f.String(), "BucketSpan", "BucketSpan", 1), `&`, ``, 1) + ","
-	}
-	repeatedStringForPositiveSpans += "}"
-	s := strings.Join([]string{`&Histogram{`,
-		`Count:` + fmt.Sprintf("%v", this.Count) + `,`,
-		`Sum:` + fmt.Sprintf("%v", this.Sum) + `,`,
-		`Schema:` + fmt.Sprintf("%v", this.Schema) + `,`,
-		`ZeroThreshold:` + fmt.Sprintf("%v", this.ZeroThreshold) + `,`,
-		`ZeroCount:` + fmt.Sprintf("%v", this.ZeroCount) + `,`,
-		`NegativeSpans:` + repeatedStringForNegativeSpans + `,`,
-		`NegativeDeltas:` + fmt.Sprintf("%v", this.NegativeDeltas) + `,`,
-		`NegativeCounts:` + fmt.Sprintf("%v", this.NegativeCounts) + `,`,
-		`PositiveSpans:` + repeatedStringForPositiveSpans + `,`,
-		`PositiveDeltas:` + fmt.Sprintf("%v", this.PositiveDeltas) + `,`,
-		`PositiveCounts:` + fmt.Sprintf("%v", this.PositiveCounts) + `,`,
-		`ResetHint:` + fmt.Sprintf("%v", this.ResetHint) + `,`,
-		`Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`,
-		`CustomValues:` + fmt.Sprintf("%v", this.CustomValues) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *Histogram_CountInt) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&Histogram_CountInt{`,
-		`CountInt:` + fmt.Sprintf("%v", this.CountInt) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *Histogram_CountFloat) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&Histogram_CountFloat{`,
-		`CountFloat:` + fmt.Sprintf("%v", this.CountFloat) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *Histogram_ZeroCountInt) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&Histogram_ZeroCountInt{`,
-		`ZeroCountInt:` + fmt.Sprintf("%v", this.ZeroCountInt) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *Histogram_ZeroCountFloat) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&Histogram_ZeroCountFloat{`,
-		`ZeroCountFloat:` + fmt.Sprintf("%v", this.ZeroCountFloat) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *BucketSpan) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&BucketSpan{`,
-		`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
-		`Length:` + fmt.Sprintf("%v", this.Length) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func valueToStringCortexv2(v interface{}) string {
-	rv := reflect.ValueOf(v)
-	if rv.IsNil() {
-		return "nil"
-	}
-	pv := reflect.Indirect(rv).Interface()
-	return fmt.Sprintf("*%v", pv)
-}
-func (m *WriteRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowCortexv2
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
-			}
-			m.Source = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Source |= WriteRequest_SourceEnum(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Symbols = append(m.Symbols, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		case 5:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Timeseries = append(m.Timeseries, PreallocTimeseriesV2{})
-			if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1000:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field SkipLabelNameValidation", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.SkipLabelNameValidation = bool(v != 0)
-		default:
-			iNdEx = preIndex
-			skippy, err := skipCortexv2(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *WriteResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowCortexv2
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: WriteResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: WriteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
-			}
-			m.Samples = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Samples |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType)
-			}
-			m.Histograms = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Histograms |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
-			}
-			m.Exemplars = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Exemplars |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipCortexv2(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *TimeSeries) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowCortexv2
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType == 0 {
-				var v uint32
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowCortexv2
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					v |= uint32(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				m.LabelsRefs = append(m.LabelsRefs, v)
-			} else if wireType == 2 {
-				var packedLen int
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowCortexv2
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					packedLen |= int(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				if packedLen < 0 {
-					return ErrInvalidLengthCortexv2
-				}
-				postIndex := iNdEx + packedLen
-				if postIndex < 0 {
-					return ErrInvalidLengthCortexv2
-				}
-				if postIndex > l {
-					return io.ErrUnexpectedEOF
-				}
-				var elementCount int
-				var count int
-				for _, integer := range dAtA[iNdEx:postIndex] {
-					if integer < 128 {
-						count++
-					}
-				}
-				elementCount = count
-				if elementCount != 0 && len(m.LabelsRefs) == 0 {
-					m.LabelsRefs = make([]uint32, 0, elementCount)
-				}
-				for iNdEx < postIndex {
-					var v uint32
-					for shift := uint(0); ; shift += 7 {
-						if shift >= 64 {
-							return ErrIntOverflowCortexv2
-						}
-						if iNdEx >= l {
-							return io.ErrUnexpectedEOF
-						}
-						b := dAtA[iNdEx]
-						iNdEx++
-						v |= uint32(b&0x7F) << shift
-						if b < 0x80 {
-							break
-						}
-					}
-					m.LabelsRefs = append(m.LabelsRefs, v)
-				}
-			} else {
-				return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType)
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Samples = append(m.Samples, Sample{})
-			if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Histograms = append(m.Histograms, Histogram{})
-			if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Exemplars = append(m.Exemplars, Exemplar{})
-			if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 5:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
-			}
-			m.CreatedTimestamp = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.CreatedTimestamp |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipCortexv2(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Exemplar) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowCortexv2
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Exemplar: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType == 0 {
-				var v uint32
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowCortexv2
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					v |= uint32(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				m.LabelsRefs = append(m.LabelsRefs, v)
-			} else if wireType == 2 {
-				var packedLen int
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowCortexv2
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					packedLen |= int(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				if packedLen < 0 {
-					return ErrInvalidLengthCortexv2
-				}
-				postIndex := iNdEx + packedLen
-				if postIndex < 0 {
-					return ErrInvalidLengthCortexv2
-				}
-				if postIndex > l {
-					return io.ErrUnexpectedEOF
-				}
-				var elementCount int
-				var count int
-				for _, integer := range dAtA[iNdEx:postIndex] {
-					if integer < 128 {
-						count++
-					}
-				}
-				elementCount = count
-				if elementCount != 0 && len(m.LabelsRefs) == 0 {
-					m.LabelsRefs = make([]uint32, 0, elementCount)
-				}
-				for iNdEx < postIndex {
-					var v uint32
-					for shift := uint(0); ; shift += 7 {
-						if shift >= 64 {
-							return ErrIntOverflowCortexv2
-						}
-						if iNdEx >= l {
-							return io.ErrUnexpectedEOF
-						}
-						b := dAtA[iNdEx]
-						iNdEx++
-						v |= uint32(b&0x7F) << shift
-						if b < 0x80 {
-							break
-						}
-					}
-					m.LabelsRefs = append(m.LabelsRefs, v)
-				}
-			} else {
-				return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType)
-			}
-		case 2:
-			if wireType != 1 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
-			}
-			var v uint64
-			if (iNdEx + 8) > l {
-				return io.ErrUnexpectedEOF
-			}
-			v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
-			iNdEx += 8
-			m.Value = float64(math.Float64frombits(v))
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
-			}
-			m.Timestamp = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Timestamp |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipCortexv2(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Sample) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowCortexv2
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Sample: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 1 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
-			}
-			var v uint64
-			if (iNdEx + 8) > l {
-				return io.ErrUnexpectedEOF
-			}
-			v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
-			iNdEx += 8
-			m.Value = float64(math.Float64frombits(v))
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
-			}
-			m.Timestamp = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Timestamp |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipCortexv2(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Metadata) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowCortexv2
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
-			}
-			m.Type = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Type |= Metadata_MetricType(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field HelpRef", wireType)
-			}
-			m.HelpRef = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.HelpRef |= uint32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field UnitRef", wireType)
-			}
-			m.UnitRef = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.UnitRef |= uint32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipCortexv2(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Histogram) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowCortexv2
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Histogram: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType)
-			}
-			var v uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Count = &Histogram_CountInt{v}
-		case 2:
-			if wireType != 1 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType)
-			}
-			var v uint64
-			if (iNdEx + 8) > l {
-				return io.ErrUnexpectedEOF
-			}
-			v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
-			iNdEx += 8
-			m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))}
-		case 3:
-			if wireType != 1 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
-			}
-			var v uint64
-			if (iNdEx + 8) > l {
-				return io.ErrUnexpectedEOF
-			}
-			v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
-			iNdEx += 8
-			m.Sum = float64(math.Float64frombits(v))
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType)
-			}
-			var v int32
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
-			m.Schema = v
-		case 5:
-			if wireType != 1 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType)
-			}
-			var v uint64
-			if (iNdEx + 8) > l {
-				return io.ErrUnexpectedEOF
-			}
-			v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
-			iNdEx += 8
-			m.ZeroThreshold = float64(math.Float64frombits(v))
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType)
-			}
-			var v uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.ZeroCount = &Histogram_ZeroCountInt{v}
-		case 7:
-			if wireType != 1 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType)
-			}
-			var v uint64
-			if (iNdEx + 8) > l {
-				return io.ErrUnexpectedEOF
-			}
-			v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
-			iNdEx += 8
-			m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))}
-		case 8:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.NegativeSpans = append(m.NegativeSpans, BucketSpan{})
-			if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 9:
-			if wireType == 0 {
-				var v uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowCortexv2
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					v |= uint64(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
-				m.NegativeDeltas = append(m.NegativeDeltas, int64(v))
-			} else if wireType == 2 {
-				var packedLen int
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowCortexv2
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					packedLen |= int(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				if packedLen < 0 {
-					return ErrInvalidLengthCortexv2
-				}
-				postIndex := iNdEx + packedLen
-				if postIndex < 0 {
-					return ErrInvalidLengthCortexv2
-				}
-				if postIndex > l {
-					return io.ErrUnexpectedEOF
-				}
-				var elementCount int
-				var count int
-				for _, integer := range dAtA[iNdEx:postIndex] {
-					if integer < 128 {
-						count++
-					}
-				}
-				elementCount = count
-				if elementCount != 0 && len(m.NegativeDeltas) == 0 {
-					m.NegativeDeltas = make([]int64, 0, elementCount)
-				}
-				for iNdEx < postIndex {
-					var v uint64
-					for shift := uint(0); ; shift += 7 {
-						if shift >= 64 {
-							return ErrIntOverflowCortexv2
-						}
-						if iNdEx >= l {
-							return io.ErrUnexpectedEOF
-						}
-						b := dAtA[iNdEx]
-						iNdEx++
-						v |= uint64(b&0x7F) << shift
-						if b < 0x80 {
-							break
-						}
-					}
-					v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
-					m.NegativeDeltas = append(m.NegativeDeltas, int64(v))
-				}
-			} else {
-				return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType)
-			}
-		case 10:
-			if wireType == 1 {
-				var v uint64
-				if (iNdEx + 8) > l {
-					return io.ErrUnexpectedEOF
-				}
-				v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
-				iNdEx += 8
-				v2 := float64(math.Float64frombits(v))
-				m.NegativeCounts = append(m.NegativeCounts, v2)
-			} else if wireType == 2 {
-				var packedLen int
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowCortexv2
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					packedLen |= int(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				if packedLen < 0 {
-					return ErrInvalidLengthCortexv2
-				}
-				postIndex := iNdEx + packedLen
-				if postIndex < 0 {
-					return ErrInvalidLengthCortexv2
-				}
-				if postIndex > l {
-					return io.ErrUnexpectedEOF
-				}
-				var elementCount int
-				elementCount = packedLen / 8
-				if elementCount != 0 && len(m.NegativeCounts) == 0 {
-					m.NegativeCounts = make([]float64, 0, elementCount)
-				}
-				for iNdEx < postIndex {
-					var v uint64
-					if (iNdEx + 8) > l {
-						return io.ErrUnexpectedEOF
-					}
-					v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
-					iNdEx += 8
-					v2 := float64(math.Float64frombits(v))
-					m.NegativeCounts = append(m.NegativeCounts, v2)
-				}
-			} else {
-				return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType)
-			}
-		case 11:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.PositiveSpans = append(m.PositiveSpans, BucketSpan{})
-			if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 12:
-			if wireType == 0 {
-				var v uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowCortexv2
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					v |= uint64(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
-				m.PositiveDeltas = append(m.PositiveDeltas, int64(v))
-			} else if wireType == 2 {
-				var packedLen int
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowCortexv2
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					packedLen |= int(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				if packedLen < 0 {
-					return ErrInvalidLengthCortexv2
-				}
-				postIndex := iNdEx + packedLen
-				if postIndex < 0 {
-					return ErrInvalidLengthCortexv2
-				}
-				if postIndex > l {
-					return io.ErrUnexpectedEOF
-				}
-				var elementCount int
-				var count int
-				for _, integer := range dAtA[iNdEx:postIndex] {
-					if integer < 128 {
-						count++
-					}
-				}
-				elementCount = count
-				if elementCount != 0 && len(m.PositiveDeltas) == 0 {
-					m.PositiveDeltas = make([]int64, 0, elementCount)
-				}
-				for iNdEx < postIndex {
-					var v uint64
-					for shift := uint(0); ; shift += 7 {
-						if shift >= 64 {
-							return ErrIntOverflowCortexv2
-						}
-						if iNdEx >= l {
-							return io.ErrUnexpectedEOF
-						}
-						b := dAtA[iNdEx]
-						iNdEx++
-						v |= uint64(b&0x7F) << shift
-						if b < 0x80 {
-							break
-						}
-					}
-					v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
-					m.PositiveDeltas = append(m.PositiveDeltas, int64(v))
-				}
-			} else {
-				return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType)
-			}
-		case 13:
-			if wireType == 1 {
-				var v uint64
-				if (iNdEx + 8) > l {
-					return io.ErrUnexpectedEOF
-				}
-				v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
-				iNdEx += 8
-				v2 := float64(math.Float64frombits(v))
-				m.PositiveCounts = append(m.PositiveCounts, v2)
-			} else if wireType == 2 {
-				var packedLen int
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowCortexv2
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					packedLen |= int(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				if packedLen < 0 {
-					return ErrInvalidLengthCortexv2
-				}
-				postIndex := iNdEx + packedLen
-				if postIndex < 0 {
-					return ErrInvalidLengthCortexv2
-				}
-				if postIndex > l {
-					return io.ErrUnexpectedEOF
-				}
-				var elementCount int
-				elementCount = packedLen / 8
-				if elementCount != 0 && len(m.PositiveCounts) == 0 {
-					m.PositiveCounts = make([]float64, 0, elementCount)
-				}
-				for iNdEx < postIndex {
-					var v uint64
-					if (iNdEx + 8) > l {
-						return io.ErrUnexpectedEOF
-					}
-					v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
-					iNdEx += 8
-					v2 := float64(math.Float64frombits(v))
-					m.PositiveCounts = append(m.PositiveCounts, v2)
-				}
-			} else {
-				return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType)
-			}
-		case 14:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType)
-			}
-			m.ResetHint = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 15:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
-			}
-			m.Timestamp = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Timestamp |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 16:
-			if wireType == 1 {
-				var v uint64
-				if (iNdEx + 8) > l {
-					return io.ErrUnexpectedEOF
-				}
-				v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
-				iNdEx += 8
-				v2 := float64(math.Float64frombits(v))
-				m.CustomValues = append(m.CustomValues, v2)
-			} else if wireType == 2 {
-				var packedLen int
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowCortexv2
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					packedLen |= int(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				if packedLen < 0 {
-					return ErrInvalidLengthCortexv2
-				}
-				postIndex := iNdEx + packedLen
-				if postIndex < 0 {
-					return ErrInvalidLengthCortexv2
-				}
-				if postIndex > l {
-					return io.ErrUnexpectedEOF
-				}
-				var elementCount int
-				elementCount = packedLen / 8
-				if elementCount != 0 && len(m.CustomValues) == 0 {
-					m.CustomValues = make([]float64, 0, elementCount)
-				}
-				for iNdEx < postIndex {
-					var v uint64
-					if (iNdEx + 8) > l {
-						return io.ErrUnexpectedEOF
-					}
-					v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
-					iNdEx += 8
-					v2 := float64(math.Float64frombits(v))
-					m.CustomValues = append(m.CustomValues, v2)
-				}
-			} else {
-				return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType)
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipCortexv2(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *BucketSpan) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowCortexv2
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
-			}
-			var v int32
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
-			m.Offset = v
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType)
-			}
-			m.Length = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Length |= uint32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipCortexv2(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthCortexv2
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipCortexv2(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowCortexv2
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-			return iNdEx, nil
-		case 1:
-			iNdEx += 8
-			return iNdEx, nil
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowCortexv2
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthCortexv2
-			}
-			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthCortexv2
-			}
-			return iNdEx, nil
-		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowCortexv2
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipCortexv2(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthCortexv2
-				}
-			}
-			return iNdEx, nil
-		case 4:
-			return iNdEx, nil
-		case 5:
-			iNdEx += 4
-			return iNdEx, nil
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-	}
-	panic("unreachable")
-}
-
-var (
-	ErrInvalidLengthCortexv2 = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowCortexv2   = fmt.Errorf("proto: integer overflow")
-)
diff --git a/pkg/cortexpbv2/cortexv2.proto b/pkg/cortexpbv2/cortexv2.proto
deleted file mode 100644
index c56038b9f0..0000000000
--- a/pkg/cortexpbv2/cortexv2.proto
+++ /dev/null
@@ -1,227 +0,0 @@
-syntax = "proto3";
-
-package cortexpbv2;
-
-option go_package = "cortexpbv2";
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-
-// https://github.com/prometheus/prometheus/blob/main/prompb/io/prometheus/write/v2/types.proto
-message WriteRequest {
-  reserved 1 to 2;
-  enum SourceEnum {
-    API = 0;
-    RULE = 1;
-  }
-  SourceEnum Source = 3;
-  repeated string symbols = 4;
-  repeated TimeSeries timeseries = 5 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseriesV2"];
-
-  bool skip_label_name_validation = 1000; // set intentionally high to keep WriteRequest compatible with upstream Prometheus
-}
-
-message WriteResponse {
-  // Samples represents X-Prometheus-Remote-Write-Written-Samples
-  int64 Samples = 1;
-  // Histograms represents X-Prometheus-Remote-Write-Written-Histograms
-  int64 Histograms = 2;
-  // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars
-  int64 Exemplars = 3;
-}
-
-message TimeSeries {
-  repeated uint32 labels_refs = 1;
-  // Timeseries messages can either specify samples or (native) histogram samples
-  // (histogram field), but not both. For a typical sender (real-time metric
-  // streaming), in healthy cases, there will be only one sample or histogram.
-  //
-  // Samples and histograms are sorted by timestamp (older first).
-  repeated Sample samples = 2 [(gogoproto.nullable) = false];
-  repeated Histogram histograms = 3 [(gogoproto.nullable) = false];
-
-  // exemplars represents an optional set of exemplars attached to this series' samples.
-  repeated Exemplar exemplars = 4 [(gogoproto.nullable) = false];
-
-  // metadata represents the metadata associated with the given series' samples.
-  Metadata metadata = 5 [(gogoproto.nullable) = false];
-
-  // created_timestamp represents an optional created timestamp associated with
-  // this series' samples in ms format, typically for counter or histogram type
-  // metrics. Created timestamp represents the time when the counter started
-  // counting (sometimes referred to as start timestamp), which can increase
-  // the accuracy of query results.
-  //
-  // Note that some receivers might require this and in return fail to
-  // ingest such samples within the Request.
-  //
-  // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
-  // for conversion from/to time.Time to Prometheus timestamp.
-  //
-  // Note that the "optional" keyword is omitted due to
-  // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
-  // Zero value means value not set. If you need to use exactly zero value for
-  // the timestamp, use 1 millisecond before or after.
-  int64 created_timestamp = 6;
-}
-
-// Exemplar is an additional information attached to some series' samples.
-// It is typically used to attach an example trace or request ID associated with
-// the metric changes.
-message Exemplar {
-  // labels_refs is an optional list of label name-value pair references, encoded
-  // as indices to the Request.symbols array. This list's len is always
-  // a multiple of 2, and the underlying labels should be sorted lexicographically.
-  // If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
-  repeated uint32 labels_refs = 1;
-  // value represents an exact example value. This can be useful when the exemplar
-  // is attached to a histogram, which only gives an estimated value through buckets.
-  double value = 2;
-  // timestamp represents the timestamp of the exemplar in ms.
-  //
-  // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
-  // for conversion from/to time.Time to Prometheus timestamp.
-  int64 timestamp = 3;
-}
-
-// Sample represents series sample.
-message Sample {
-  // value of the sample.
-  double value = 1;
-  // timestamp represents timestamp of the sample in ms.
-  //
-  // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
-  // for conversion from/to time.Time to Prometheus timestamp.
-  int64 timestamp = 2;
-}
-
-// Metadata represents the metadata associated with the given series' samples.
-message Metadata {
-  enum MetricType {
-    METRIC_TYPE_UNSPECIFIED    = 0;
-    METRIC_TYPE_COUNTER        = 1;
-    METRIC_TYPE_GAUGE          = 2;
-    METRIC_TYPE_HISTOGRAM      = 3;
-    METRIC_TYPE_GAUGEHISTOGRAM = 4;
-    METRIC_TYPE_SUMMARY        = 5;
-    METRIC_TYPE_INFO           = 6;
-    METRIC_TYPE_STATESET       = 7;
-  }
-  MetricType type = 1;
-  // help_ref is a reference to the Request.symbols array representing help
-  // text for the metric. Help is optional, reference should point to an empty string in
-  // such a case.
-  uint32 help_ref = 3;
-  // unit_ref is a reference to the Request.symbols array representing a unit
-  // for the metric. Unit is optional, reference should point to an empty string in
-  // such a case.
-  uint32 unit_ref = 4;
-}
-
-// A native histogram, also known as a sparse histogram.
-// Original design doc:
-// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
-// The appendix of this design doc also explains the concept of float
-// histograms. This Histogram message can represent both, the usual
-// integer histogram as well as a float histogram.
-message Histogram {
-  enum ResetHint {
-    RESET_HINT_UNSPECIFIED = 0; // Need to test for a counter reset explicitly.
-    RESET_HINT_YES     = 1; // This is the 1st histogram after a counter reset.
-    RESET_HINT_NO      = 2; // There was no counter reset between this and the previous Histogram.
-    RESET_HINT_GAUGE   = 3; // This is a gauge histogram where counter resets don't happen.
-  }
-
-  oneof count { // Count of observations in the histogram.
-    uint64 count_int   = 1;
-    double count_float = 2;
-  }
-  double sum = 3; // Sum of observations in the histogram.
-
-  // The schema defines the bucket schema. Currently, valid numbers
-  // are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be
-  // added in future for new bucketing layouts.
-  //
-  // The schema equal to -53 means custom buckets. See
-  // custom_values field description for more details.
-  //
-  // Values between -4 and 8 represent base-2 bucket schema, where 1
-  // is a bucket boundary in each case, and then each power of two is
-  // divided into 2^n (n is schema value) logarithmic buckets. Or in other words,
-  // each bucket boundary is the previous boundary times 2^(2^-n).
-  sint32 schema             = 4;
-  double zero_threshold     = 5; // Breadth of the zero bucket.
-  oneof zero_count { // Count in zero bucket.
-    uint64 zero_count_int     = 6;
-    double zero_count_float   = 7;
-  }
-
-  // Negative Buckets.
-  repeated BucketSpan negative_spans =  8 [(gogoproto.nullable) = false];
-  // Use either "negative_deltas" or "negative_counts", the former for
-  // regular histograms with integer counts, the latter for
-  // float histograms.
-  repeated sint64 negative_deltas    =  9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
-  repeated double negative_counts    = 10; // Absolute count of each bucket.
-
-  // Positive Buckets.
-  //
-  // In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows:
-  // * The span offset+length points to an the index of the custom_values array
-  // or +Inf if pointing to the len of the array.
-  // * The counts and deltas have the same meaning as for exponential histograms.
-  repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false];
-  // Use either "positive_deltas" or "positive_counts", the former for
-  // regular histograms with integer counts, the latter for
-  // float histograms.
-  repeated sint64 positive_deltas    = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
-  repeated double positive_counts    = 13; // Absolute count of each bucket.
-
-  ResetHint reset_hint               = 14;
-  // timestamp represents timestamp of the sample in ms.
-  //
-  // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
-  // for conversion from/to time.Time to Prometheus timestamp.
-  int64 timestamp = 15;
-
-  // custom_values is an additional field used by non-exponential bucketing layouts.
-  //
-  // For custom buckets (-53 schema value) custom_values specify monotonically
-  // increasing upper inclusive boundaries for the bucket counts with arbitrary
-  // widths for this histogram. In other words, custom_values represents custom,
-  // explicit bucketing that could have been converted from the classic histograms.
-  //
-  // Those bounds are then referenced by spans in positive_spans with corresponding positive
-  // counts of deltas (refer to positive_spans for more details). This way we can
-  // have encode sparse histograms with custom bucketing (many buckets are often
-  // not used).
-  //
-  // Note that for custom bounds, even negative observations are placed in the positive
-  // counts to simplify the implementation and avoid ambiguity of where to place
-  // an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and
-  // the zero bucket are unused, if the schema indicates custom bucketing.
-  //
-  // For each upper boundary the previous boundary represent the lower exclusive
-  // boundary for that bucket. The first element is the upper inclusive boundary
-  // for the first bucket, which implicitly has a lower inclusive bound of -Inf.
-  // This is similar to "le" label semantics on classic histograms. You may add a
-  // bucket with an upper bound of 0 to make sure that you really have no negative
-  // observations, but in practice, native histogram rendering will show both with
-  // or without first upper boundary 0 and no negative counts as the same case.
-  //
-  // The last element is not only the upper inclusive bound of the last regular
-  // bucket, but implicitly the lower exclusive bound of the +Inf bucket.
-  repeated double custom_values = 16;
-}
-
-// A BucketSpan defines a number of consecutive buckets with their
-// offset. Logically, it would be more straightforward to include the
-// bucket counts in the Span. However, the protobuf representation is
-// more compact in the way the data is structured here (with all the
-// buckets in a single array separate from the Spans).
-message BucketSpan {
-  sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
-  uint32 length = 2; // Length of consecutive buckets.
-}
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 5cd0c13e88..75a3dfc423 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -29,7 +29,6 @@ import (
 	"go.uber.org/atomic"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/ha"
 	"github.com/cortexproject/cortex/pkg/ingester"
 	ingester_client "github.com/cortexproject/cortex/pkg/ingester/client"
@@ -46,7 +45,7 @@ import (
 )
 
 var (
-	emptyPreallocSeriesV2 = cortexpbv2.PreallocTimeseriesV2{}
+	emptyPreallocSeriesV2 = cortexpb.PreallocTimeseriesV2{}
 	emptyPreallocSeries   = cortexpb.PreallocTimeseries{}
 
 	supportedShardingStrategies = []string{util.ShardingStrategyDefault, util.ShardingStrategyShuffle}
@@ -631,12 +630,12 @@ func (d *Distributor) validateSeries(ts cortexpb.PreallocTimeseries, userID stri
 		nil
 }
 
-func (d *Distributor) prepareSeriesKeysV2(ctx context.Context, req *cortexpbv2.WriteRequest, userID string, limits *validation.Limits, b labels.ScratchBuilder, st *writev2.SymbolsTable, removeReplica bool) ([]uint32, []cortexpbv2.PreallocTimeseriesV2, int64, int64, int64, int64, error, error) {
+func (d *Distributor) prepareSeriesKeysV2(ctx context.Context, req *cortexpb.WriteRequestV2, userID string, limits *validation.Limits, b labels.ScratchBuilder, st *writev2.SymbolsTable, removeReplica bool) ([]uint32, []cortexpb.PreallocTimeseriesV2, int64, int64, int64, int64, error, error) {
 	pSpan, _ := opentracing.StartSpanFromContext(ctx, "prepareSeriesKeysV2")
 	defer pSpan.Finish()
 	// For each timeseries or samples, we compute a hash to distribute across ingesters;
 	// check each sample/metadata and discard if outside limits.
-	validatedTimeseries := make([]cortexpbv2.PreallocTimeseriesV2, 0, len(req.Timeseries))
+	validatedTimeseries := make([]cortexpb.PreallocTimeseriesV2, 0, len(req.Timeseries))
 	seriesKeys := make([]uint32, 0, len(req.Timeseries))
 	validatedFloatSamples := 0
 	validatedHistogramSamples := 0
@@ -659,10 +658,10 @@ func (d *Distributor) prepareSeriesKeysV2(ctx context.Context, req *cortexpbv2.W
 	for _, ts := range req.Timeseries {
 		// Use timestamp of latest sample in the series. If samples for series are not ordered, metric for user may be wrong.
 		if len(ts.Samples) > 0 {
-			latestSampleTimestampMs = max(latestSampleTimestampMs, ts.Samples[len(ts.Samples)-1].Timestamp)
+			latestSampleTimestampMs = max(latestSampleTimestampMs, ts.Samples[len(ts.Samples)-1].TimestampMs)
 		}
 		if len(ts.Histograms) > 0 {
-			latestSampleTimestampMs = max(latestSampleTimestampMs, ts.Histograms[len(ts.Histograms)-1].Timestamp)
+			latestSampleTimestampMs = max(latestSampleTimestampMs, ts.Histograms[len(ts.Histograms)-1].TimestampMs)
 		}
 
 		lbs := ts.ToLabels(&b, req.Symbols)
@@ -737,7 +736,7 @@ func (d *Distributor) prepareSeriesKeysV2(ctx context.Context, req *cortexpbv2.W
 			firstPartialErr = httpgrpc.Errorf(http.StatusBadRequest, validationErr.Error())
 		}
 
-		if ts.Metadata.Type != cortexpbv2.METRIC_TYPE_UNSPECIFIED {
+		if ts.Metadata.Type != cortexpb.UNKNOWN {
 			// since metadata is attached, count only metadata that is not METRIC_TYPE_UNSPECIFIED.
 			validatedMetadata++
 		}
@@ -757,7 +756,7 @@ func (d *Distributor) prepareSeriesKeysV2(ctx context.Context, req *cortexpbv2.W
 	return seriesKeys, validatedTimeseries, int64(validatedMetadata), int64(validatedFloatSamples), int64(validatedHistogramSamples), int64(validatedExemplars), firstPartialErr, nil
 }
 
-func (d *Distributor) doBatchV2(ctx context.Context, req *cortexpbv2.WriteRequest, subRing ring.ReadRing, keys []uint32, validatedTimeseries []cortexpbv2.PreallocTimeseriesV2, userID string, stats *WriteStats) error {
+func (d *Distributor) doBatchV2(ctx context.Context, req *cortexpb.WriteRequestV2, subRing ring.ReadRing, keys []uint32, validatedTimeseries []cortexpb.PreallocTimeseriesV2, userID string, stats *WriteStats) error {
 	span, _ := opentracing.StartSpanFromContext(ctx, "doBatchV2")
 	defer span.Finish()
 
@@ -781,7 +780,7 @@ func (d *Distributor) doBatchV2(ctx context.Context, req *cortexpbv2.WriteReques
 	}
 
 	return ring.DoBatch(ctx, op, subRing, keys, func(ingester ring.InstanceDesc, indexes []int) error {
-		timeseries := make([]cortexpbv2.PreallocTimeseriesV2, 0, len(indexes))
+		timeseries := make([]cortexpb.PreallocTimeseriesV2, 0, len(indexes))
 
 		for _, i := range indexes {
 			timeseries = append(timeseries, validatedTimeseries[i])
@@ -789,12 +788,12 @@ func (d *Distributor) doBatchV2(ctx context.Context, req *cortexpbv2.WriteReques
 
 		return d.sendV2(localCtx, req.Symbols, ingester, timeseries, req.Source, stats)
 	}, func() {
-		cortexpbv2.ReuseSlice(req.Timeseries)
+		cortexpb.ReuseSliceV2(req.Timeseries)
 		cancel()
 	})
 }
 
-func (d *Distributor) sendV2(ctx context.Context, symbols []string, ingester ring.InstanceDesc, timeseries []cortexpbv2.PreallocTimeseriesV2, source cortexpbv2.WriteRequest_SourceEnum, stats *WriteStats) error {
+func (d *Distributor) sendV2(ctx context.Context, symbols []string, ingester ring.InstanceDesc, timeseries []cortexpb.PreallocTimeseriesV2, source cortexpb.SourceEnum, stats *WriteStats) error {
 	h, err := d.ingesterPool.GetClientFor(ingester.Addr)
 	if err != nil {
 		return err
@@ -807,14 +806,24 @@ func (d *Distributor) sendV2(ctx context.Context, symbols []string, ingester rin
 
 	c := h.(ingester_client.HealthAndIngesterClient)
 
-	req := cortexpbv2.PreallocWriteRequestV2FromPool()
+	req := cortexpb.PreallocWriteRequestV2FromPool()
 	req.Symbols = symbols
 	req.Timeseries = timeseries
 	req.Source = source
 
 	resp, err := c.PushPreAllocV2(ctx, req)
 	if err == nil {
-		cortexpbv2.ReuseWriteRequestV2(req)
+		cortexpb.ReuseWriteRequestV2(req)
+	}
+
+	if err != nil && strings.Contains(err.Error(), "unknown method PushV2") {
+		// To handle rolling update where distributor can handle PRW2.0 but Ingesters are not.
+		// Convert V2 timeseries to V1 timesereis and metadata and then send PRW1.0 request to Ingester
+		v1Ts, v1Metadata, err := d.convertV2ToV1(symbols, timeseries)
+		if err != nil {
+			return err
+		}
+		return d.send(ctx, ingester, v1Ts, v1Metadata, source)
 	}
 
 	if len(timeseries) > 0 {
@@ -825,7 +834,7 @@ func (d *Distributor) sendV2(ctx context.Context, symbols []string, ingester rin
 
 		metadataAppend := false
 		for _, ts := range timeseries {
-			if ts.Metadata.Type != cortexpbv2.METRIC_TYPE_UNSPECIFIED {
+			if ts.Metadata.Type != cortexpb.UNKNOWN {
 				metadataAppend = true
 				break
 			}
@@ -848,33 +857,70 @@ func (d *Distributor) sendV2(ctx context.Context, symbols []string, ingester rin
 	return err
 }
 
+func (d *Distributor) convertV2ToV1(symbols []string, timeseries []cortexpb.PreallocTimeseriesV2) ([]cortexpb.PreallocTimeseries, []*cortexpb.MetricMetadata, error) {
+	var v1Timeseries []cortexpb.PreallocTimeseries
+	var v1Metadata []*cortexpb.MetricMetadata
+
+	b := labels.NewScratchBuilder(0)
+	for _, v2Ts := range timeseries {
+		las := cortexpb.FromLabelsToLabelAdapters(v2Ts.ToLabels(&b, symbols))
+		v1Timeseries = append(v1Timeseries, cortexpb.PreallocTimeseries{
+			TimeSeries: &cortexpb.TimeSeries{
+				Labels:     las,
+				Samples:    v2Ts.Samples,
+				Exemplars:  d.convertV1ToV2Exemplars(b, symbols, v2Ts.Exemplars),
+				Histograms: v2Ts.Histograms,
+			},
+		})
+		metricName, err := extract.MetricNameFromLabelAdapters(las)
+		if err != nil {
+			return nil, nil, err
+		}
+		v1Metadata = append(v1Metadata, v2Ts.Metadata.ToV1Metadata(metricName, symbols))
+	}
+
+	return v1Timeseries, v1Metadata, nil
+}
+
+func (d *Distributor) convertV1ToV2Exemplars(b labels.ScratchBuilder, symbols []string, v2Exemplars []cortexpb.ExemplarV2) []cortexpb.Exemplar {
+	v1Exemplars := make([]cortexpb.Exemplar, 0, len(v2Exemplars))
+	for _, e := range v2Exemplars {
+		v1Exemplars = append(v1Exemplars, cortexpb.Exemplar{
+			Labels:      cortexpb.FromLabelsToLabelAdapters(e.ToLabels(&b, symbols)),
+			Value:       e.Value,
+			TimestampMs: e.Timestamp,
+		})
+	}
+	return v1Exemplars
+}
+
 // Validates a single series from a write request. Will remove labels if
 // any are configured to be dropped for the user ID.
 // Returns the validated series with it's labels/samples, and any error.
 // The returned error may retain the series labels.
-func (d *Distributor) validateSeriesV2(ts cortexpbv2.PreallocTimeseriesV2, seriesLabels []cortexpb.LabelAdapter, symbols []string, userID string, skipLabelNameValidation bool, limits *validation.Limits, b labels.ScratchBuilder, st *writev2.SymbolsTable) (cortexpbv2.PreallocTimeseriesV2, validation.ValidationError) {
+func (d *Distributor) validateSeriesV2(ts cortexpb.PreallocTimeseriesV2, seriesLabels []cortexpb.LabelAdapter, symbols []string, userID string, skipLabelNameValidation bool, limits *validation.Limits, b labels.ScratchBuilder, st *writev2.SymbolsTable) (cortexpb.PreallocTimeseriesV2, validation.ValidationError) {
 	d.labelsHistogram.Observe(float64(len(ts.LabelsRefs)))
 
 	if err := validation.ValidateLabels(d.validateMetrics, limits, userID, seriesLabels, skipLabelNameValidation); err != nil {
 		return emptyPreallocSeriesV2, err
 	}
 
-	var samples []cortexpbv2.Sample
+	var samples []cortexpb.Sample
 	if len(ts.Samples) > 0 {
 		// Only alloc when data present
-		samples = make([]cortexpbv2.Sample, 0, len(ts.Samples))
+		samples = make([]cortexpb.Sample, 0, len(ts.Samples))
 		for _, s := range ts.Samples {
-			if err := validation.ValidateSampleTimestamp(d.validateMetrics, limits, userID, seriesLabels, s.Timestamp); err != nil {
+			if err := validation.ValidateSampleTimestamp(d.validateMetrics, limits, userID, seriesLabels, s.TimestampMs); err != nil {
 				return emptyPreallocSeriesV2, err
 			}
 			samples = append(samples, s)
 		}
 	}
 
-	var exemplars []cortexpbv2.Exemplar
+	var exemplars []cortexpb.ExemplarV2
 	if len(ts.Exemplars) > 0 {
 		// Only alloc when data present
-		exemplars = make([]cortexpbv2.Exemplar, 0, len(ts.Exemplars))
+		exemplars = make([]cortexpb.ExemplarV2, 0, len(ts.Exemplars))
 		for _, e := range ts.Exemplars {
 			if err := validation.ValidateExemplarV2(d.validateMetrics, symbols, userID, seriesLabels, &e, b, st); err != nil {
 				// An exemplar validation error prevents ingesting samples
@@ -887,15 +933,15 @@ func (d *Distributor) validateSeriesV2(ts cortexpbv2.PreallocTimeseriesV2, serie
 		}
 	}
 
-	var histograms []cortexpbv2.Histogram
+	var histograms []cortexpb.Histogram
 	if len(ts.Histograms) > 0 {
 		// Only alloc when data present
-		histograms = make([]cortexpbv2.Histogram, 0, len(ts.Histograms))
+		histograms = make([]cortexpb.Histogram, 0, len(ts.Histograms))
 		for i, h := range ts.Histograms {
-			if err := validation.ValidateSampleTimestamp(d.validateMetrics, limits, userID, seriesLabels, h.Timestamp); err != nil {
+			if err := validation.ValidateSampleTimestamp(d.validateMetrics, limits, userID, seriesLabels, h.TimestampMs); err != nil {
 				return emptyPreallocSeriesV2, err
 			}
-			convertedHistogram, err := validation.ValidateNativeHistogramV2(d.validateMetrics, limits, userID, seriesLabels, h)
+			convertedHistogram, err := validation.ValidateNativeHistogram(d.validateMetrics, limits, userID, seriesLabels, h)
 			if err != nil {
 				return emptyPreallocSeriesV2, err
 			}
@@ -910,8 +956,8 @@ func (d *Distributor) validateSeriesV2(ts cortexpbv2.PreallocTimeseriesV2, serie
 		return emptyPreallocSeriesV2, err
 	}
 
-	return cortexpbv2.PreallocTimeseriesV2{
-		TimeSeries: &cortexpbv2.TimeSeries{
+	return cortexpb.PreallocTimeseriesV2{
+		TimeSeriesV2: &cortexpb.TimeSeriesV2{
 			LabelsRefs: ts.LabelsRefs,
 			Samples:    samples,
 			Exemplars:  exemplars,
@@ -921,7 +967,7 @@ func (d *Distributor) validateSeriesV2(ts cortexpbv2.PreallocTimeseriesV2, serie
 	}, nil
 }
 
-func (d *Distributor) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+func (d *Distributor) PushV2(ctx context.Context, req *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) {
 	userID, err := tenant.TenantID(ctx)
 	if err != nil {
 		return nil, err
@@ -973,7 +1019,7 @@ func (d *Distributor) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest)
 		removeReplica, err = d.checkSample(ctx, userID, cluster, replica, limits)
 		if err != nil {
 			// Ensure the request slice is reused if the series get deduped.
-			cortexpbv2.ReuseSlice(req.Timeseries)
+			cortexpb.ReuseSliceV2(req.Timeseries)
 
 			if errors.Is(err, ha.ReplicasNotMatchError{}) {
 				// These samples have been deduped.
@@ -1009,16 +1055,16 @@ func (d *Distributor) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest)
 
 	if len(seriesKeys) == 0 {
 		// Ensure the request slice is reused if there's no series or metadata passing the validation.
-		cortexpbv2.ReuseSlice(req.Timeseries)
+		cortexpb.ReuseSliceV2(req.Timeseries)
 
-		return &cortexpbv2.WriteResponse{}, firstPartialErr
+		return &cortexpb.WriteResponseV2{}, firstPartialErr
 	}
 
 	totalSamples := validatedFloatSamples + validatedHistogramSamples
 	totalN := totalSamples + validatedExemplars + validatedMetadatas
 	if !d.ingestionRateLimiter.AllowN(now, userID, int(totalN)) {
 		// Ensure the request slice is reused if the request is rate limited.
-		cortexpbv2.ReuseSlice(req.Timeseries)
+		cortexpb.ReuseSliceV2(req.Timeseries)
 
 		d.validateMetrics.DiscardedSamples.WithLabelValues(validation.RateLimited, userID).Add(float64(totalSamples))
 		d.validateMetrics.DiscardedExemplars.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedExemplars))
@@ -1048,7 +1094,7 @@ func (d *Distributor) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest)
 		return nil, err
 	}
 
-	resp := &cortexpbv2.WriteResponse{
+	resp := &cortexpb.WriteResponseV2{
 		Samples:    s.LoadSamples(),
 		Histograms: s.LoadHistogram(),
 		Exemplars:  s.LoadExemplars(),
@@ -1420,7 +1466,7 @@ func sortLabelsIfNeeded(labels []cortexpb.LabelAdapter) {
 	})
 }
 
-func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, timeseries []cortexpb.PreallocTimeseries, metadata []*cortexpb.MetricMetadata, source cortexpb.WriteRequest_SourceEnum) error {
+func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, timeseries []cortexpb.PreallocTimeseries, metadata []*cortexpb.MetricMetadata, source cortexpb.SourceEnum) error {
 	h, err := d.ingesterPool.GetClientFor(ingester.Addr)
 	if err != nil {
 		return err
diff --git a/pkg/distributor/distributor_prw2_test.go b/pkg/distributor/distributor_prw2_test.go
index 80f8d92f70..77227125af 100644
--- a/pkg/distributor/distributor_prw2_test.go
+++ b/pkg/distributor/distributor_prw2_test.go
@@ -25,7 +25,6 @@ import (
 	"google.golang.org/grpc/status"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/ingester"
 	"github.com/cortexproject/cortex/pkg/ingester/client"
 	"github.com/cortexproject/cortex/pkg/ring"
@@ -43,7 +42,7 @@ import (
 )
 
 var (
-	emptyResponseV2 = &cortexpbv2.WriteResponse{}
+	emptyResponseV2 = &cortexpb.WriteResponseV2{}
 )
 
 func TestDistributorPRW2_Push_LabelRemoval_RemovingNameLabelWillError(t *testing.T) {
@@ -97,7 +96,7 @@ func TestDistributorPRW2_Push_LabelRemoval(t *testing.T) {
 		expectedSeries labels.Labels
 		removeReplica  bool
 		removeLabels   []string
-		exemplars      []cortexpbv2.Exemplar
+		exemplars      []cortexpb.ExemplarV2
 	}
 
 	cases := []testcase{
@@ -153,7 +152,7 @@ func TestDistributorPRW2_Push_LabelRemoval(t *testing.T) {
 				{Name: "__replica__", Value: "two"},
 			},
 			expectedSeries: labels.Labels{},
-			exemplars: []cortexpbv2.Exemplar{
+			exemplars: []cortexpb.ExemplarV2{
 				{LabelsRefs: []uint32{1, 2}, Value: 1, Timestamp: 0},
 				{LabelsRefs: []uint32{1, 2}, Value: 1, Timestamp: 0},
 			},
@@ -223,7 +222,7 @@ func TestDistributorPRW2_PushHAInstances(t *testing.T) {
 		testReplica      string
 		cluster          string
 		samples          int
-		expectedResponse *cortexpbv2.WriteResponse
+		expectedResponse *cortexpb.WriteResponseV2
 		expectedCode     int32
 	}{
 		{
@@ -315,14 +314,14 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 
 	tests := map[string]struct {
 		prepareConfig func(limits *validation.Limits)
-		prepareSeries func() ([]labels.Labels, []cortexpbv2.Sample)
+		prepareSeries func() ([]labels.Labels, []cortexpb.Sample)
 		expectedErr   string
 	}{
 		"all samples successfully pushed": {
 			prepareConfig: func(limits *validation.Limits) {},
-			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+			prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) {
 				metrics := make([]labels.Labels, numSeriesPerRequest)
-				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+				samples := make([]cortexpb.Sample, numSeriesPerRequest)
 
 				for i := 0; i < numSeriesPerRequest; i++ {
 					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
@@ -331,9 +330,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 					}
 
 					metrics[i] = lbls.Labels()
-					samples[i] = cortexpbv2.Sample{
-						Value:     float64(i),
-						Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+					samples[i] = cortexpb.Sample{
+						Value:       float64(i),
+						TimestampMs: time.Now().UnixNano() / int64(time.Millisecond),
 					}
 				}
 
@@ -346,9 +345,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 				limits.IngestionRate = 1
 				limits.IngestionBurstSize = 1
 			},
-			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+			prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) {
 				metrics := make([]labels.Labels, numSeriesPerRequest)
-				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+				samples := make([]cortexpb.Sample, numSeriesPerRequest)
 
 				for i := 0; i < numSeriesPerRequest; i++ {
 					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
@@ -357,9 +356,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 					}
 
 					metrics[i] = lbls.Labels()
-					samples[i] = cortexpbv2.Sample{
-						Value:     float64(i),
-						Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+					samples[i] = cortexpb.Sample{
+						Value:       float64(i),
+						TimestampMs: time.Now().UnixNano() / int64(time.Millisecond),
 					}
 				}
 
@@ -371,9 +370,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 			prepareConfig: func(limits *validation.Limits) {
 				limits.MaxLabelNamesPerSeries = 30
 			},
-			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+			prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) {
 				metrics := make([]labels.Labels, numSeriesPerRequest)
-				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+				samples := make([]cortexpb.Sample, numSeriesPerRequest)
 
 				for i := 0; i < numSeriesPerRequest; i++ {
 					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
@@ -382,9 +381,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 					}
 
 					metrics[i] = lbls.Labels()
-					samples[i] = cortexpbv2.Sample{
-						Value:     float64(i),
-						Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+					samples[i] = cortexpb.Sample{
+						Value:       float64(i),
+						TimestampMs: time.Now().UnixNano() / int64(time.Millisecond),
 					}
 				}
 
@@ -396,9 +395,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 			prepareConfig: func(limits *validation.Limits) {
 				limits.MaxLabelNameLength = 1024
 			},
-			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+			prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) {
 				metrics := make([]labels.Labels, numSeriesPerRequest)
-				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+				samples := make([]cortexpb.Sample, numSeriesPerRequest)
 
 				for i := 0; i < numSeriesPerRequest; i++ {
 					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
@@ -410,9 +409,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 					lbls.Set(fmt.Sprintf("xxx_%0.2000d", 1), "xxx")
 
 					metrics[i] = lbls.Labels()
-					samples[i] = cortexpbv2.Sample{
-						Value:     float64(i),
-						Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+					samples[i] = cortexpb.Sample{
+						Value:       float64(i),
+						TimestampMs: time.Now().UnixNano() / int64(time.Millisecond),
 					}
 				}
 
@@ -424,9 +423,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 			prepareConfig: func(limits *validation.Limits) {
 				limits.MaxLabelValueLength = 1024
 			},
-			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+			prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) {
 				metrics := make([]labels.Labels, numSeriesPerRequest)
-				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+				samples := make([]cortexpb.Sample, numSeriesPerRequest)
 
 				for i := 0; i < numSeriesPerRequest; i++ {
 					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
@@ -438,9 +437,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 					lbls.Set("xxx", fmt.Sprintf("xxx_%0.2000d", 1))
 
 					metrics[i] = lbls.Labels()
-					samples[i] = cortexpbv2.Sample{
-						Value:     float64(i),
-						Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+					samples[i] = cortexpb.Sample{
+						Value:       float64(i),
+						TimestampMs: time.Now().UnixNano() / int64(time.Millisecond),
 					}
 				}
 
@@ -452,9 +451,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 			prepareConfig: func(limits *validation.Limits) {
 				limits.MaxLabelsSizeBytes = 1024
 			},
-			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+			prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) {
 				metrics := make([]labels.Labels, numSeriesPerRequest)
-				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+				samples := make([]cortexpb.Sample, numSeriesPerRequest)
 
 				for i := 0; i < numSeriesPerRequest; i++ {
 					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
@@ -466,9 +465,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 					lbls.Set("xxx", fmt.Sprintf("xxx_%0.2000d", 1))
 
 					metrics[i] = lbls.Labels()
-					samples[i] = cortexpbv2.Sample{
-						Value:     float64(i),
-						Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+					samples[i] = cortexpb.Sample{
+						Value:       float64(i),
+						TimestampMs: time.Now().UnixNano() / int64(time.Millisecond),
 					}
 				}
 
@@ -481,9 +480,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 				limits.RejectOldSamples = true
 				limits.RejectOldSamplesMaxAge = model.Duration(time.Hour)
 			},
-			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+			prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) {
 				metrics := make([]labels.Labels, numSeriesPerRequest)
-				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+				samples := make([]cortexpb.Sample, numSeriesPerRequest)
 
 				for i := 0; i < numSeriesPerRequest; i++ {
 					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
@@ -492,9 +491,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 					}
 
 					metrics[i] = lbls.Labels()
-					samples[i] = cortexpbv2.Sample{
-						Value:     float64(i),
-						Timestamp: time.Now().Add(-2*time.Hour).UnixNano() / int64(time.Millisecond),
+					samples[i] = cortexpb.Sample{
+						Value:       float64(i),
+						TimestampMs: time.Now().Add(-2*time.Hour).UnixNano() / int64(time.Millisecond),
 					}
 				}
 
@@ -506,9 +505,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 			prepareConfig: func(limits *validation.Limits) {
 				limits.CreationGracePeriod = model.Duration(time.Minute)
 			},
-			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+			prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) {
 				metrics := make([]labels.Labels, numSeriesPerRequest)
-				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+				samples := make([]cortexpb.Sample, numSeriesPerRequest)
 
 				for i := 0; i < numSeriesPerRequest; i++ {
 					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}})
@@ -517,9 +516,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 					}
 
 					metrics[i] = lbls.Labels()
-					samples[i] = cortexpbv2.Sample{
-						Value:     float64(i),
-						Timestamp: time.Now().Add(time.Hour).UnixNano() / int64(time.Millisecond),
+					samples[i] = cortexpb.Sample{
+						Value:       float64(i),
+						TimestampMs: time.Now().Add(time.Hour).UnixNano() / int64(time.Millisecond),
 					}
 				}
 
@@ -596,7 +595,7 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) {
 			b.ResetTimer()
 
 			for n := 0; n < b.N; n++ {
-				_, err := distributor.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API))
+				_, err := distributor.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API))
 				if testData.expectedErr == "" && err != nil {
 					b.Fatalf("no error expected but got %v", err)
 				}
@@ -628,7 +627,7 @@ func TestDistributorPRW2_Push(t *testing.T) {
 		samples          samplesIn
 		histogramSamples bool
 		metadata         int
-		expectedResponse *cortexpbv2.WriteResponse
+		expectedResponse *cortexpb.WriteResponseV2
 		expectedError    error
 		expectedMetrics  string
 		ingesterError    error
@@ -857,7 +856,7 @@ func TestDistributorPRW2_Push(t *testing.T) {
 					errFail:          tc.ingesterError,
 				})
 
-				var request *cortexpbv2.WriteRequest
+				var request *cortexpb.WriteRequestV2
 				if !tc.histogramSamples {
 					request = makeWriteRequestV2WithSamples(tc.samples.startTimestampMs, tc.samples.num, tc.metadata)
 				} else {
@@ -966,7 +965,7 @@ func TestDistributorPRW2_PushIngestionRateLimiter(t *testing.T) {
 
 				// Push samples in multiple requests to the first distributor
 				for _, push := range testData.pushes {
-					var request *cortexpbv2.WriteRequest
+					var request *cortexpb.WriteRequestV2
 					if !enableHistogram {
 						request = makeWriteRequestV2WithSamples(0, push.samples, push.metadata)
 					} else {
@@ -1229,7 +1228,7 @@ func TestDistributorPRW2_PushInstanceLimits(t *testing.T) {
 				d.ingestionRate.Tick()
 
 				for _, push := range testData.pushes {
-					var request *cortexpbv2.WriteRequest
+					var request *cortexpb.WriteRequestV2
 					if enableHistogram {
 						request = makeWriteRequestV2WithHistogram(0, push.samples, push.metadata)
 					} else {
@@ -1401,7 +1400,7 @@ func TestDistributorPRW2_PushQuery(t *testing.T) {
 
 			request := makeWriteRequestV2WithSamples(0, tc.samples, tc.metadata)
 			writeResponse, err := ds[0].PushV2(ctx, request)
-			assert.Equal(t, &cortexpbv2.WriteResponse{}, writeResponse)
+			assert.Equal(t, &cortexpb.WriteResponseV2{}, writeResponse)
 			assert.Nil(t, err)
 
 			var response model.Matrix
@@ -1451,7 +1450,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIs
 		// Push a number of series below the max chunks limit. Each series has 1 sample,
 		// so expect 1 chunk per series when querying back.
 		initialSeries := maxChunksLimit / 3
-		var writeReqV2 *cortexpbv2.WriteRequest
+		var writeReqV2 *cortexpb.WriteRequestV2
 		if histogram {
 			writeReqV2 = makeWriteRequestV2WithHistogram(0, initialSeries, 0)
 		} else {
@@ -1459,7 +1458,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIs
 		}
 
 		writeRes, err := ds[0].PushV2(ctx, writeReqV2)
-		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes)
 		assert.Nil(t, err)
 
 		allSeriesMatchers := []*labels.Matcher{
@@ -1475,13 +1474,13 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIs
 		// Push more series to exceed the limit once we'll query back all series.
 
 		for i := 0; i < maxChunksLimit; i++ {
-			writeReq := &cortexpbv2.WriteRequest{}
+			writeReq := &cortexpb.WriteRequestV2{}
 			writeReq.Symbols = []string{"", "__name__", fmt.Sprintf("another_series_%d", i)}
 			writeReq.Timeseries = append(writeReq.Timeseries,
 				makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: fmt.Sprintf("another_series_%d", i)}}, 0, 0, histogram, false),
 			)
 			writeRes, err := ds[0].PushV2(ctx, writeReq)
-			assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+			assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes)
 			assert.Nil(t, err)
 		}
 
@@ -1514,7 +1513,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIs
 
 		// Push a number of series below the max series limit.
 		initialSeries := maxSeriesLimit
-		var writeReqV2 *cortexpbv2.WriteRequest
+		var writeReqV2 *cortexpb.WriteRequestV2
 		if histogram {
 			writeReqV2 = makeWriteRequestV2WithHistogram(0, initialSeries, 0)
 		} else {
@@ -1522,7 +1521,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIs
 		}
 
 		writeRes, err := ds[0].PushV2(ctx, writeReqV2)
-		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes)
 		assert.Nil(t, err)
 
 		allSeriesMatchers := []*labels.Matcher{
@@ -1536,14 +1535,14 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIs
 		assert.Len(t, queryRes.Chunkseries, initialSeries)
 
 		// Push more series to exceed the limit once we'll query back all series.
-		writeReq := &cortexpbv2.WriteRequest{}
+		writeReq := &cortexpb.WriteRequestV2{}
 		writeReq.Symbols = []string{"", "__name__", "another_series"}
 		writeReq.Timeseries = append(writeReq.Timeseries,
 			makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0, histogram, false),
 		)
 
 		writeRes, err = ds[0].PushV2(ctx, writeReq)
-		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes)
 		assert.Nil(t, err)
 
 		// Since the number of series is exceeding the limit, we expect
@@ -1579,13 +1578,13 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLim
 			labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"),
 		}
 		// Push a single series to allow us to calculate the chunk size to calculate the limit for the test.
-		writeReq := &cortexpbv2.WriteRequest{}
+		writeReq := &cortexpb.WriteRequestV2{}
 		writeReq.Symbols = []string{"", "__name__", "another_series"}
 		writeReq.Timeseries = append(writeReq.Timeseries,
 			makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0, histogram, false),
 		)
 		writeRes, err := ds[0].PushV2(ctx, writeReq)
-		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes)
 		assert.Nil(t, err)
 		chunkSizeResponse, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...)
 		require.NoError(t, err)
@@ -1598,7 +1597,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLim
 		ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, maxBytesLimit, 0, 0))
 
 		// Push a number of series below the max chunk bytes limit. Subtract one for the series added above.
-		var writeReqV2 *cortexpbv2.WriteRequest
+		var writeReqV2 *cortexpb.WriteRequestV2
 		if histogram {
 			writeReqV2 = makeWriteRequestV2WithHistogram(0, seriesToAdd-1, 0)
 		} else {
@@ -1606,7 +1605,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLim
 		}
 
 		writeRes, err = ds[0].PushV2(ctx, writeReqV2)
-		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes)
 		assert.Nil(t, err)
 
 		// Since the number of chunk bytes is equal to the limit (but doesn't
@@ -1616,14 +1615,14 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLim
 		assert.Len(t, queryRes.Chunkseries, seriesToAdd)
 
 		// Push another series to exceed the chunk bytes limit once we'll query back all series.
-		writeReq = &cortexpbv2.WriteRequest{}
+		writeReq = &cortexpb.WriteRequestV2{}
 		writeReq.Symbols = []string{"", "__name__", "another_series_1"}
 		writeReq.Timeseries = append(writeReq.Timeseries,
 			makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series_1"}}, 0, 0, histogram, false),
 		)
 
 		writeRes, err = ds[0].PushV2(ctx, writeReq)
-		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes)
 		assert.Nil(t, err)
 
 		// Since the aggregated chunk size is exceeding the limit, we expect
@@ -1659,14 +1658,14 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxDataBytesPerQueryLimi
 			labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"),
 		}
 		// Push a single series to allow us to calculate the label size to calculate the limit for the test.
-		writeReq := &cortexpbv2.WriteRequest{}
+		writeReq := &cortexpb.WriteRequestV2{}
 		writeReq.Symbols = []string{"", "__name__", "another_series"}
 		writeReq.Timeseries = append(writeReq.Timeseries,
 			makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0, histogram, false),
 		)
 
 		writeRes, err := ds[0].PushV2(ctx, writeReq)
-		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes)
 		assert.Nil(t, err)
 		dataSizeResponse, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...)
 		require.NoError(t, err)
@@ -1679,7 +1678,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxDataBytesPerQueryLimi
 		ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, 0, 0, maxBytesLimit))
 
 		// Push a number of series below the max chunk bytes limit. Subtract one for the series added above.
-		var writeReqV2 *cortexpbv2.WriteRequest
+		var writeReqV2 *cortexpb.WriteRequestV2
 		if histogram {
 			writeReqV2 = makeWriteRequestV2WithHistogram(0, seriesToAdd-1, 0)
 		} else {
@@ -1687,7 +1686,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxDataBytesPerQueryLimi
 		}
 
 		writeRes, err = ds[0].PushV2(ctx, writeReqV2)
-		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes)
 		assert.Nil(t, err)
 
 		// Since the number of chunk bytes is equal to the limit (but doesn't
@@ -1697,14 +1696,14 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxDataBytesPerQueryLimi
 		assert.Len(t, queryRes.Chunkseries, seriesToAdd)
 
 		// Push another series to exceed the chunk bytes limit once we'll query back all series.
-		writeReq = &cortexpbv2.WriteRequest{}
+		writeReq = &cortexpb.WriteRequestV2{}
 		writeReq.Symbols = []string{"", "__name__", "another_series_1"}
 		writeReq.Timeseries = append(writeReq.Timeseries,
 			makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series_1"}}, 0, 0, histogram, false),
 		)
 
 		writeRes, err = ds[0].PushV2(ctx, writeReq)
-		assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes)
+		assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes)
 		assert.Nil(t, err)
 
 		// Since the aggregated chunk size is exceeding the limit, we expect
@@ -1824,8 +1823,8 @@ func TestDistributorPRW2_Push_ShouldGuaranteeShardingTokenConsistencyOverTheTime
 	}
 }
 
-func makeWriteRequestV2WithSamples(startTimestampMs int64, samples int, metadata int) *cortexpbv2.WriteRequest {
-	request := &cortexpbv2.WriteRequest{}
+func makeWriteRequestV2WithSamples(startTimestampMs int64, samples int, metadata int) *cortexpb.WriteRequestV2 {
+	request := &cortexpb.WriteRequestV2{}
 	st := writev2.NewSymbolTable()
 	st.SymbolizeLabels(labels.Labels{{Name: "__name__", Value: "foo"}, {Name: "bar", Value: "baz"}}, nil)
 
@@ -1916,7 +1915,7 @@ func TestDistributorPRW2_Push_ExemplarValidation(t *testing.T) {
 	}
 
 	tests := map[string]struct {
-		req    *cortexpbv2.WriteRequest
+		req    *cortexpb.WriteRequestV2
 		errMsg string
 	}{
 		"valid exemplar": {
@@ -2207,16 +2206,16 @@ func BenchmarkDistributorPRW2_MetricsForLabelMatchers(b *testing.B) {
 
 	tests := map[string]struct {
 		prepareConfig func(limits *validation.Limits)
-		prepareSeries func() ([]labels.Labels, []cortexpbv2.Sample)
+		prepareSeries func() ([]labels.Labels, []cortexpb.Sample)
 		matchers      []*labels.Matcher
 		queryLimiter  *limiter.QueryLimiter
 		expectedErr   error
 	}{
 		"get series within limits": {
 			prepareConfig: func(limits *validation.Limits) {},
-			prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) {
+			prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) {
 				metrics := make([]labels.Labels, numSeriesPerRequest)
-				samples := make([]cortexpbv2.Sample, numSeriesPerRequest)
+				samples := make([]cortexpb.Sample, numSeriesPerRequest)
 
 				for i := 0; i < numSeriesPerRequest; i++ {
 					lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: fmt.Sprintf("foo_%d", i)}})
@@ -2225,9 +2224,9 @@ func BenchmarkDistributorPRW2_MetricsForLabelMatchers(b *testing.B) {
 					}
 
 					metrics[i] = lbls.Labels()
-					samples[i] = cortexpbv2.Sample{
-						Value:     float64(i),
-						Timestamp: time.Now().UnixNano() / int64(time.Millisecond),
+					samples[i] = cortexpb.Sample{
+						Value:       float64(i),
+						TimestampMs: time.Now().UnixNano() / int64(time.Millisecond),
 					}
 				}
 
@@ -2260,7 +2259,7 @@ func BenchmarkDistributorPRW2_MetricsForLabelMatchers(b *testing.B) {
 			// Prepare the series to remote write before starting the benchmark.
 			metrics, samples := testData.prepareSeries()
 
-			if _, err := ds[0].PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)); err != nil {
+			if _, err := ds[0].PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)); err != nil {
 				b.Fatalf("error pushing to distributor %v", err)
 			}
 
@@ -2351,8 +2350,8 @@ func TestDistributorPRW2_MetricsMetadata(t *testing.T) {
 	}
 }
 
-func makeWriteRequestV2WithHistogram(startTimestampMs int64, histogram int, metadata int) *cortexpbv2.WriteRequest {
-	request := &cortexpbv2.WriteRequest{}
+func makeWriteRequestV2WithHistogram(startTimestampMs int64, histogram int, metadata int) *cortexpb.WriteRequestV2 {
+	request := &cortexpb.WriteRequestV2{}
 	st := writev2.NewSymbolTable()
 	st.SymbolizeLabels(labels.Labels{{Name: "__name__", Value: "foo"}, {Name: "bar", Value: "baz"}}, nil)
 
@@ -2375,48 +2374,48 @@ func makeWriteRequestV2WithHistogram(startTimestampMs int64, histogram int, meta
 	return request
 }
 
-func makeMetadataV2FromST(value int, st *writev2.SymbolsTable) cortexpbv2.PreallocTimeseriesV2 {
-	t := cortexpbv2.PreallocTimeseriesV2{
-		TimeSeries: &cortexpbv2.TimeSeries{
+func makeMetadataV2FromST(value int, st *writev2.SymbolsTable) cortexpb.PreallocTimeseriesV2 {
+	t := cortexpb.PreallocTimeseriesV2{
+		TimeSeriesV2: &cortexpb.TimeSeriesV2{
 			LabelsRefs: []uint32{1, 2},
 		},
 	}
 	helpRef := st.Symbolize(fmt.Sprintf("a help for metric_%d", value))
-	t.Metadata.Type = cortexpbv2.METRIC_TYPE_COUNTER
+	t.Metadata.Type = cortexpb.COUNTER
 	t.Metadata.HelpRef = helpRef
 
 	return t
 }
 
-func makeTimeseriesV2FromST(labels []cortexpb.LabelAdapter, st *writev2.SymbolsTable, ts int64, value int, histogram bool, metadata bool) cortexpbv2.PreallocTimeseriesV2 {
+func makeTimeseriesV2FromST(labels []cortexpb.LabelAdapter, st *writev2.SymbolsTable, ts int64, value int, histogram bool, metadata bool) cortexpb.PreallocTimeseriesV2 {
 	var helpRef uint32
 	if metadata {
 		helpRef = st.Symbolize(fmt.Sprintf("a help for metric_%d", value))
 	}
 
-	t := cortexpbv2.PreallocTimeseriesV2{
-		TimeSeries: &cortexpbv2.TimeSeries{
+	t := cortexpb.PreallocTimeseriesV2{
+		TimeSeriesV2: &cortexpb.TimeSeriesV2{
 			LabelsRefs: st.SymbolizeLabels(cortexpb.FromLabelAdaptersToLabels(labels), nil),
 		},
 	}
 	if metadata {
-		t.Metadata.Type = cortexpbv2.METRIC_TYPE_COUNTER
+		t.Metadata.Type = cortexpb.COUNTER
 		t.Metadata.HelpRef = helpRef
 	}
 
 	if histogram {
-		t.Histograms = append(t.Histograms, cortexpbv2.HistogramToHistogramProto(ts, tsdbutil.GenerateTestHistogram(value)))
+		t.Histograms = append(t.Histograms, cortexpb.HistogramToHistogramProto(ts, tsdbutil.GenerateTestHistogram(value)))
 	} else {
-		t.Samples = append(t.Samples, cortexpbv2.Sample{
-			Timestamp: ts,
-			Value:     float64(value),
+		t.Samples = append(t.Samples, cortexpb.Sample{
+			TimestampMs: ts,
+			Value:       float64(value),
 		})
 	}
 
 	return t
 }
 
-func makeWriteRequestV2Timeseries(labels []cortexpb.LabelAdapter, ts int64, value int, histogram bool, metadata bool) cortexpbv2.PreallocTimeseriesV2 {
+func makeWriteRequestV2Timeseries(labels []cortexpb.LabelAdapter, ts int64, value int, histogram bool, metadata bool) cortexpb.PreallocTimeseriesV2 {
 	st := writev2.NewSymbolTable()
 	st.SymbolizeLabels(cortexpb.FromLabelAdaptersToLabels(labels), nil)
 
@@ -2425,29 +2424,29 @@ func makeWriteRequestV2Timeseries(labels []cortexpb.LabelAdapter, ts int64, valu
 		helpRef = st.Symbolize(fmt.Sprintf("a help for metric_%d", value))
 	}
 
-	t := cortexpbv2.PreallocTimeseriesV2{
-		TimeSeries: &cortexpbv2.TimeSeries{
+	t := cortexpb.PreallocTimeseriesV2{
+		TimeSeriesV2: &cortexpb.TimeSeriesV2{
 			LabelsRefs: st.SymbolizeLabels(cortexpb.FromLabelAdaptersToLabels(labels), nil),
 		},
 	}
 	if metadata {
-		t.Metadata.Type = cortexpbv2.METRIC_TYPE_COUNTER
+		t.Metadata.Type = cortexpb.COUNTER
 		t.Metadata.HelpRef = helpRef
 	}
 
 	if histogram {
-		t.Histograms = append(t.Histograms, cortexpbv2.HistogramToHistogramProto(ts, tsdbutil.GenerateTestHistogram(value)))
+		t.Histograms = append(t.Histograms, cortexpb.HistogramToHistogramProto(ts, tsdbutil.GenerateTestHistogram(value)))
 	} else {
-		t.Samples = append(t.Samples, cortexpbv2.Sample{
-			Timestamp: ts,
-			Value:     float64(value),
+		t.Samples = append(t.Samples, cortexpb.Sample{
+			TimestampMs: ts,
+			Value:       float64(value),
 		})
 	}
 
 	return t
 }
 
-func makeWriteRequestV2Exemplar(seriesLabels []string, timestamp int64, exemplarLabels []string) *cortexpbv2.WriteRequest {
+func makeWriteRequestV2Exemplar(seriesLabels []string, timestamp int64, exemplarLabels []string) *cortexpb.WriteRequestV2 {
 	st := writev2.NewSymbolTable()
 	for _, l := range seriesLabels {
 		st.Symbolize(l)
@@ -2456,15 +2455,15 @@ func makeWriteRequestV2Exemplar(seriesLabels []string, timestamp int64, exemplar
 		st.Symbolize(l)
 	}
 
-	return &cortexpbv2.WriteRequest{
+	return &cortexpb.WriteRequestV2{
 		Symbols: st.Symbols(),
-		Timeseries: []cortexpbv2.PreallocTimeseriesV2{
+		Timeseries: []cortexpb.PreallocTimeseriesV2{
 			{
-				TimeSeries: &cortexpbv2.TimeSeries{
-					LabelsRefs: cortexpbv2.GetLabelRefsFromLabelAdapters(st.Symbols(), cortexpb.FromLabelsToLabelAdapters(labels.FromStrings(seriesLabels...))),
-					Exemplars: []cortexpbv2.Exemplar{
+				TimeSeriesV2: &cortexpb.TimeSeriesV2{
+					LabelsRefs: cortexpb.GetLabelRefsFromLabelAdapters(st.Symbols(), cortexpb.FromLabelsToLabelAdapters(labels.FromStrings(seriesLabels...))),
+					Exemplars: []cortexpb.ExemplarV2{
 						{
-							LabelsRefs: cortexpbv2.GetLabelRefsFromLabelAdapters(st.Symbols(), cortexpb.FromLabelsToLabelAdapters(labels.FromStrings(exemplarLabels...))),
+							LabelsRefs: cortexpb.GetLabelRefsFromLabelAdapters(st.Symbols(), cortexpb.FromLabelsToLabelAdapters(labels.FromStrings(exemplarLabels...))),
 							Timestamp:  timestamp,
 						},
 					},
@@ -2474,47 +2473,47 @@ func makeWriteRequestV2Exemplar(seriesLabels []string, timestamp int64, exemplar
 	}
 }
 
-func mockWriteRequestV2(lbls []labels.Labels, value int64, timestamp int64, histogram bool) *cortexpbv2.WriteRequest {
+func mockWriteRequestV2(lbls []labels.Labels, value int64, timestamp int64, histogram bool) *cortexpb.WriteRequestV2 {
 	var (
-		samples    []cortexpbv2.Sample
-		histograms []cortexpbv2.Histogram
+		samples    []cortexpb.Sample
+		histograms []cortexpb.Histogram
 	)
 	if histogram {
-		histograms = make([]cortexpbv2.Histogram, len(lbls))
+		histograms = make([]cortexpb.Histogram, len(lbls))
 		for i := range lbls {
-			histograms[i] = cortexpbv2.HistogramToHistogramProto(timestamp, tsdbutil.GenerateTestHistogram(int(value)))
+			histograms[i] = cortexpb.HistogramToHistogramProto(timestamp, tsdbutil.GenerateTestHistogram(int(value)))
 		}
 	} else {
-		samples = make([]cortexpbv2.Sample, len(lbls))
+		samples = make([]cortexpb.Sample, len(lbls))
 		for i := range lbls {
-			samples[i] = cortexpbv2.Sample{
-				Timestamp: timestamp,
-				Value:     float64(value),
+			samples[i] = cortexpb.Sample{
+				TimestampMs: timestamp,
+				Value:       float64(value),
 			}
 		}
 	}
 
-	return cortexpbv2.ToWriteRequestV2(lbls, samples, histograms, nil, cortexpbv2.API)
+	return cortexpb.ToWriteRequestV2(lbls, samples, histograms, nil, cortexpb.API)
 }
 
-func makeWriteRequestHAV2(samples int, replica, cluster string, histogram bool) *cortexpbv2.WriteRequest {
-	request := &cortexpbv2.WriteRequest{}
+func makeWriteRequestHAV2(samples int, replica, cluster string, histogram bool) *cortexpb.WriteRequestV2 {
+	request := &cortexpb.WriteRequestV2{}
 	st := writev2.NewSymbolTable()
 	for i := 0; i < samples; i++ {
-		ts := cortexpbv2.PreallocTimeseriesV2{
-			TimeSeries: &cortexpbv2.TimeSeries{
+		ts := cortexpb.PreallocTimeseriesV2{
+			TimeSeriesV2: &cortexpb.TimeSeriesV2{
 				LabelsRefs: st.SymbolizeLabels(labels.Labels{{Name: "__name__", Value: "foo"}, {Name: "__replica__", Value: replica}, {Name: "bar", Value: "baz"}, {Name: "cluster", Value: cluster}, {Name: "sample", Value: fmt.Sprintf("%d", i)}}, nil),
 			},
 		}
 		if histogram {
-			ts.Histograms = []cortexpbv2.Histogram{
-				cortexpbv2.HistogramToHistogramProto(int64(i), tsdbutil.GenerateTestHistogram(i)),
+			ts.Histograms = []cortexpb.Histogram{
+				cortexpb.HistogramToHistogramProto(int64(i), tsdbutil.GenerateTestHistogram(i)),
 			}
 		} else {
-			ts.Samples = []cortexpbv2.Sample{
+			ts.Samples = []cortexpb.Sample{
 				{
-					Value:     float64(i),
-					Timestamp: int64(i),
+					Value:       float64(i),
+					TimestampMs: int64(i),
 				},
 			}
 		}
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index 68d3353cf8..4bcb9ad0ac 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -35,7 +35,6 @@ import (
 
 	promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding"
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/ha"
 	"github.com/cortexproject/cortex/pkg/ingester"
 	"github.com/cortexproject/cortex/pkg/ingester/client"
@@ -3070,11 +3069,11 @@ func (i *mockIngester) PushPreAlloc(ctx context.Context, in *cortexpb.PreallocWr
 	return i.Push(ctx, &in.WriteRequest, opts...)
 }
 
-func (i *mockIngester) PushPreAllocV2(ctx context.Context, in *cortexpbv2.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) {
-	return i.PushV2(ctx, &in.WriteRequest, opts...)
+func (i *mockIngester) PushPreAllocV2(ctx context.Context, in *cortexpb.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) {
+	return i.PushV2(ctx, &in.WriteRequestV2, opts...)
 }
 
-func (i *mockIngester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) {
+func (i *mockIngester) PushV2(ctx context.Context, req *cortexpb.WriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) {
 	i.Lock()
 	defer i.Unlock()
 
@@ -3108,7 +3107,7 @@ func (i *mockIngester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest,
 		for _, s := range series.Samples {
 			v1Sample = append(v1Sample, cortexpb.Sample{
 				Value:       s.Value,
-				TimestampMs: s.Timestamp,
+				TimestampMs: s.TimestampMs,
 			})
 		}
 		if !ok {
@@ -3126,7 +3125,7 @@ func (i *mockIngester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest,
 			existing.Samples = append(existing.Samples, v1Sample...)
 		}
 
-		if series.Metadata.Type != cortexpbv2.METRIC_TYPE_UNSPECIFIED {
+		if series.Metadata.Type != cortexpb.UNKNOWN {
 			m := series.Metadata.ToV1Metadata(tsLabels.Get(model.MetricNameLabel), req.Symbols)
 			hash = shardByMetricName(orgid, m.MetricFamilyName)
 			set, ok := i.metadata[hash]
@@ -3138,7 +3137,7 @@ func (i *mockIngester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest,
 		}
 	}
 
-	return &cortexpbv2.WriteResponse{}, nil
+	return &cortexpb.WriteResponseV2{}, nil
 }
 
 func (i *mockIngester) Push(ctx context.Context, req *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) {
@@ -3391,7 +3390,7 @@ func (i *noopIngester) PushPreAlloc(ctx context.Context, in *cortexpb.PreallocWr
 	return nil, nil
 }
 
-func (i *noopIngester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) {
+func (i *noopIngester) PushV2(ctx context.Context, req *cortexpb.WriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) {
 	return nil, nil
 }
 
diff --git a/pkg/distributor/distributorpb/distributor.pb.go b/pkg/distributor/distributorpb/distributor.pb.go
index 5946061f32..b522283831 100644
--- a/pkg/distributor/distributorpb/distributor.pb.go
+++ b/pkg/distributor/distributorpb/distributor.pb.go
@@ -7,7 +7,6 @@ import (
 	context "context"
 	fmt "fmt"
 	cortexpb "github.com/cortexproject/cortex/pkg/cortexpb"
-	cortexpbv2 "github.com/cortexproject/cortex/pkg/cortexpbv2"
 	_ "github.com/gogo/protobuf/gogoproto"
 	proto "github.com/gogo/protobuf/proto"
 	grpc "google.golang.org/grpc"
@@ -30,23 +29,22 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 func init() { proto.RegisterFile("distributor.proto", fileDescriptor_c518e33639ca565d) }
 
 var fileDescriptor_c518e33639ca565d = []byte{
-	// 245 bytes of a gzipped FileDescriptorProto
+	// 236 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0xc9, 0x2c, 0x2e,
 	0x29, 0xca, 0x4c, 0x2a, 0x2d, 0xc9, 0x2f, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x46,
 	0x12, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xeb, 0x83, 0x58, 0x10, 0x25, 0x52, 0x96,
 	0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xc9, 0xf9, 0x45, 0x25, 0xa9,
 	0x15, 0x05, 0x45, 0xf9, 0x59, 0xa9, 0xc9, 0x25, 0x50, 0x9e, 0x7e, 0x41, 0x76, 0x3a, 0x4c, 0x22,
-	0x09, 0xca, 0x80, 0x6a, 0xb5, 0x25, 0x45, 0x6b, 0x99, 0x11, 0x94, 0x59, 0x66, 0x04, 0xd1, 0x6e,
-	0xd4, 0xc9, 0xc8, 0xc5, 0xed, 0x82, 0x70, 0x9f, 0x90, 0x25, 0x17, 0x4b, 0x40, 0x69, 0x71, 0x86,
-	0x90, 0x98, 0x1e, 0x4c, 0x8f, 0x5e, 0x78, 0x51, 0x66, 0x49, 0x6a, 0x50, 0x6a, 0x61, 0x69, 0x6a,
-	0x71, 0x89, 0x94, 0x38, 0x86, 0x78, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x12, 0x83, 0x90, 0x3d,
-	0x17, 0x1b, 0x48, 0x6b, 0x98, 0x91, 0x90, 0x84, 0x1e, 0xc2, 0x42, 0x54, 0xed, 0x92, 0x58, 0x64,
-	0x60, 0x06, 0x38, 0x39, 0x5f, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72,
-	0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23,
-	0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2,
-	0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xe2, 0x45, 0x0a, 0xde,
-	0x82, 0xa4, 0x24, 0x36, 0xb0, 0xbf, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x56, 0x3b,
-	0xcc, 0x89, 0x01, 0x00, 0x00,
+	0x09, 0xca, 0x80, 0x68, 0x35, 0xea, 0x64, 0xe4, 0xe2, 0x76, 0x41, 0x58, 0x20, 0x64, 0xc9, 0xc5,
+	0x12, 0x50, 0x5a, 0x9c, 0x21, 0x24, 0xa6, 0x07, 0x53, 0xaf, 0x17, 0x5e, 0x94, 0x59, 0x92, 0x1a,
+	0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x22, 0x25, 0x8e, 0x21, 0x5e, 0x5c, 0x90, 0x9f, 0x57, 0x9c,
+	0xaa, 0xc4, 0x20, 0x64, 0xcf, 0xc5, 0x06, 0xd2, 0x1a, 0x66, 0x24, 0x24, 0x81, 0x5d, 0x73, 0x98,
+	0x91, 0x94, 0x24, 0x0e, 0xed, 0x61, 0x46, 0x4a, 0x0c, 0x4e, 0xce, 0x17, 0x1e, 0xca, 0x31, 0xdc,
+	0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe4, 0x18,
+	0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x17, 0x8f, 0xe4,
+	0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f,
+	0xe5, 0x18, 0xa2, 0x78, 0x91, 0xc2, 0xa7, 0x20, 0x29, 0x89, 0x0d, 0xec, 0x2f, 0x63, 0x40, 0x00,
+	0x00, 0x00, 0xff, 0xff, 0xce, 0xdc, 0x64, 0xe8, 0x4a, 0x01, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -62,7 +60,7 @@ const _ = grpc.SupportPackageIsVersion4
 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type DistributorClient interface {
 	Push(ctx context.Context, in *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error)
-	PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error)
+	PushV2(ctx context.Context, in *cortexpb.WriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error)
 }
 
 type distributorClient struct {
@@ -82,8 +80,8 @@ func (c *distributorClient) Push(ctx context.Context, in *cortexpb.WriteRequest,
 	return out, nil
 }
 
-func (c *distributorClient) PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) {
-	out := new(cortexpbv2.WriteResponse)
+func (c *distributorClient) PushV2(ctx context.Context, in *cortexpb.WriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) {
+	out := new(cortexpb.WriteResponseV2)
 	err := c.cc.Invoke(ctx, "/distributor.Distributor/PushV2", in, out, opts...)
 	if err != nil {
 		return nil, err
@@ -94,7 +92,7 @@ func (c *distributorClient) PushV2(ctx context.Context, in *cortexpbv2.WriteRequ
 // DistributorServer is the server API for Distributor service.
 type DistributorServer interface {
 	Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error)
-	PushV2(context.Context, *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error)
+	PushV2(context.Context, *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error)
 }
 
 // UnimplementedDistributorServer can be embedded to have forward compatible implementations.
@@ -104,7 +102,7 @@ type UnimplementedDistributorServer struct {
 func (*UnimplementedDistributorServer) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) {
 	return nil, status.Errorf(codes.Unimplemented, "method Push not implemented")
 }
-func (*UnimplementedDistributorServer) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+func (*UnimplementedDistributorServer) PushV2(ctx context.Context, req *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) {
 	return nil, status.Errorf(codes.Unimplemented, "method PushV2 not implemented")
 }
 
@@ -131,7 +129,7 @@ func _Distributor_Push_Handler(srv interface{}, ctx context.Context, dec func(in
 }
 
 func _Distributor_PushV2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(cortexpbv2.WriteRequest)
+	in := new(cortexpb.WriteRequestV2)
 	if err := dec(in); err != nil {
 		return nil, err
 	}
@@ -143,7 +141,7 @@ func _Distributor_PushV2_Handler(srv interface{}, ctx context.Context, dec func(
 		FullMethod: "/distributor.Distributor/PushV2",
 	}
 	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(DistributorServer).PushV2(ctx, req.(*cortexpbv2.WriteRequest))
+		return srv.(DistributorServer).PushV2(ctx, req.(*cortexpb.WriteRequestV2))
 	}
 	return interceptor(ctx, in, info, handler)
 }
diff --git a/pkg/distributor/distributorpb/distributor.proto b/pkg/distributor/distributorpb/distributor.proto
index 93234f1abb..7f594c1a5d 100644
--- a/pkg/distributor/distributorpb/distributor.proto
+++ b/pkg/distributor/distributorpb/distributor.proto
@@ -6,12 +6,11 @@ option go_package = "distributorpb";
 
 import "gogoproto/gogo.proto";
 import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto";
-import "github.com/cortexproject/cortex/pkg/cortexpbv2/cortexv2.proto";
 
 option (gogoproto.marshaler_all) = true;
 option (gogoproto.unmarshaler_all) = true;
 
 service Distributor {
   rpc Push(cortexpb.WriteRequest) returns (cortexpb.WriteResponse) {};
-  rpc PushV2(cortexpbv2.WriteRequest) returns (cortexpbv2.WriteResponse) {};
+  rpc PushV2(cortexpb.WriteRequestV2) returns (cortexpb.WriteResponseV2) {};
 }
diff --git a/pkg/ingester/client/client.go b/pkg/ingester/client/client.go
index b8b3ddefe9..dfeda83ecb 100644
--- a/pkg/ingester/client/client.go
+++ b/pkg/ingester/client/client.go
@@ -5,7 +5,6 @@ import (
 	"flag"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/util/grpcclient"
 	"github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock"
 
@@ -44,7 +43,7 @@ type HealthAndIngesterClient interface {
 	grpc_health_v1.HealthClient
 	Close() error
 	PushPreAlloc(ctx context.Context, in *cortexpb.PreallocWriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error)
-	PushPreAllocV2(ctx context.Context, in *cortexpbv2.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error)
+	PushPreAllocV2(ctx context.Context, in *cortexpb.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error)
 }
 
 type closableHealthAndIngesterClient struct {
@@ -57,9 +56,9 @@ type closableHealthAndIngesterClient struct {
 	inflightPushRequests    *prometheus.GaugeVec
 }
 
-func (c *closableHealthAndIngesterClient) PushPreAllocV2(ctx context.Context, in *cortexpbv2.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) {
-	return c.handlePushRequestV2(func() (*cortexpbv2.WriteResponse, error) {
-		out := new(cortexpbv2.WriteResponse)
+func (c *closableHealthAndIngesterClient) PushPreAllocV2(ctx context.Context, in *cortexpb.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) {
+	return c.handlePushRequestV2(func() (*cortexpb.WriteResponseV2, error) {
+		out := new(cortexpb.WriteResponseV2)
 		err := c.conn.Invoke(ctx, "/cortex.Ingester/PushV2", in, out, opts...)
 		if err != nil {
 			return nil, err
@@ -85,13 +84,13 @@ func (c *closableHealthAndIngesterClient) Push(ctx context.Context, in *cortexpb
 	})
 }
 
-func (c *closableHealthAndIngesterClient) PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) {
-	return c.handlePushRequestV2(func() (*cortexpbv2.WriteResponse, error) {
+func (c *closableHealthAndIngesterClient) PushV2(ctx context.Context, in *cortexpb.WriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) {
+	return c.handlePushRequestV2(func() (*cortexpb.WriteResponseV2, error) {
 		return c.IngesterClient.PushV2(ctx, in, opts...)
 	})
 }
 
-func (c *closableHealthAndIngesterClient) handlePushRequestV2(mainFunc func() (*cortexpbv2.WriteResponse, error)) (*cortexpbv2.WriteResponse, error) {
+func (c *closableHealthAndIngesterClient) handlePushRequestV2(mainFunc func() (*cortexpb.WriteResponseV2, error)) (*cortexpb.WriteResponseV2, error) {
 	currentInflight := c.inflightRequests.Inc()
 	c.inflightPushRequests.WithLabelValues(c.addr).Set(float64(currentInflight))
 	defer func() {
diff --git a/pkg/ingester/client/ingester.pb.go b/pkg/ingester/client/ingester.pb.go
index e587962642..cb808f63e2 100644
--- a/pkg/ingester/client/ingester.pb.go
+++ b/pkg/ingester/client/ingester.pb.go
@@ -12,7 +12,6 @@ import (
 	fmt "fmt"
 	cortexpb "github.com/cortexproject/cortex/pkg/cortexpb"
 	github_com_cortexproject_cortex_pkg_cortexpb "github.com/cortexproject/cortex/pkg/cortexpb"
-	cortexpbv2 "github.com/cortexproject/cortex/pkg/cortexpbv2"
 	_ "github.com/gogo/protobuf/gogoproto"
 	proto "github.com/gogo/protobuf/proto"
 	grpc "google.golang.org/grpc"
@@ -1485,93 +1484,93 @@ func init() {
 func init() { proto.RegisterFile("ingester.proto", fileDescriptor_60f6df4f3586b478) }
 
 var fileDescriptor_60f6df4f3586b478 = []byte{
-	// 1368 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x73, 0x14, 0x55,
-	0x14, 0x9e, 0xce, 0x3c, 0x98, 0x39, 0xf3, 0x60, 0x72, 0x13, 0xc8, 0xa4, 0x91, 0x0e, 0x34, 0x85,
-	0xa6, 0x54, 0x26, 0x30, 0x6a, 0x15, 0xa8, 0x48, 0x25, 0x10, 0x20, 0x40, 0x08, 0x74, 0x02, 0x5a,
-	0x96, 0x56, 0x57, 0x67, 0xe6, 0x92, 0xb4, 0xf4, 0x63, 0xe8, 0x7b, 0x27, 0x05, 0xae, 0xb4, 0xfc,
-	0x01, 0xba, 0x74, 0xeb, 0x4a, 0x7f, 0x80, 0x3f, 0x82, 0x25, 0x0b, 0x17, 0x94, 0x0b, 0x4a, 0x86,
-	0x8d, 0x4b, 0xfc, 0x07, 0x56, 0xdf, 0x47, 0xbf, 0x32, 0x79, 0x60, 0x81, 0xbb, 0xbe, 0xe7, 0x7c,
-	0xe7, 0xdc, 0x73, 0xbf, 0x7b, 0xce, 0x3d, 0x67, 0x06, 0x1a, 0xb6, 0xb7, 0x81, 0x09, 0xc5, 0x41,
-	0xbb, 0x1f, 0xf8, 0xd4, 0x47, 0xa5, 0xae, 0x1f, 0x50, 0xfc, 0x50, 0x9d, 0xdc, 0xf0, 0x37, 0x7c,
-	0x26, 0x9a, 0x0b, 0xbf, 0xb8, 0x56, 0x3d, 0xb7, 0x61, 0xd3, 0xcd, 0xc1, 0x7a, 0xbb, 0xeb, 0xbb,
-	0x73, 0x1c, 0xd8, 0x0f, 0xfc, 0x6f, 0x70, 0x97, 0x8a, 0xd5, 0x5c, 0xff, 0xfe, 0x86, 0x54, 0xac,
-	0x8b, 0x0f, 0x61, 0x7a, 0xfe, 0x55, 0x4c, 0xb7, 0x3a, 0xe2, 0x73, 0xab, 0xc3, 0xcd, 0xf5, 0xf3,
-	0x50, 0x35, 0xb0, 0xd5, 0x33, 0xf0, 0x83, 0x01, 0x26, 0x14, 0xb5, 0xe1, 0xc0, 0x83, 0x01, 0x0e,
-	0x6c, 0x4c, 0x5a, 0xca, 0xb1, 0xfc, 0x6c, 0xb5, 0x33, 0xd9, 0x16, 0xbb, 0xdd, 0x1e, 0xe0, 0xe0,
-	0x91, 0x80, 0x19, 0x12, 0xa4, 0x5f, 0x80, 0x1a, 0x37, 0x27, 0x7d, 0xdf, 0x23, 0x18, 0xcd, 0xc1,
-	0x81, 0x00, 0x93, 0x81, 0x43, 0xa5, 0xfd, 0xa1, 0x8c, 0x3d, 0xc7, 0x19, 0x12, 0xa5, 0x5f, 0x87,
-	0x7a, 0x4a, 0x83, 0x3e, 0x06, 0xa0, 0xb6, 0x8b, 0xc9, 0xa8, 0x20, 0xfa, 0xeb, 0xed, 0x35, 0xdb,
-	0xc5, 0xab, 0x4c, 0xb7, 0x50, 0x78, 0xfc, 0x6c, 0x26, 0x67, 0x24, 0xd0, 0xfa, 0xcf, 0x0a, 0xd4,
-	0x92, 0x71, 0xa2, 0xf7, 0x01, 0x11, 0x6a, 0x05, 0xd4, 0x64, 0x20, 0x6a, 0xb9, 0x7d, 0xd3, 0x0d,
-	0x9d, 0x2a, 0xb3, 0x79, 0xa3, 0xc9, 0x34, 0x6b, 0x52, 0xb1, 0x4c, 0xd0, 0x2c, 0x34, 0xb1, 0xd7,
-	0x4b, 0x63, 0xc7, 0x18, 0xb6, 0x81, 0xbd, 0x5e, 0x12, 0x79, 0x1a, 0xca, 0xae, 0x45, 0xbb, 0x9b,
-	0x38, 0x20, 0xad, 0x7c, 0x9a, 0xa7, 0x1b, 0xd6, 0x3a, 0x76, 0x96, 0xb9, 0xd2, 0x88, 0x50, 0xfa,
-	0x2f, 0x0a, 0x4c, 0x2e, 0x3e, 0xc4, 0x6e, 0xdf, 0xb1, 0x82, 0xff, 0x25, 0xc4, 0x33, 0xdb, 0x42,
-	0x3c, 0x34, 0x2a, 0x44, 0x92, 0x88, 0xf1, 0x2b, 0x98, 0x60, 0xa1, 0xad, 0xd2, 0x00, 0x5b, 0x6e,
-	0x74, 0x23, 0x17, 0xa0, 0xda, 0xdd, 0x1c, 0x78, 0xf7, 0x53, 0x57, 0x32, 0x25, 0x9d, 0xc5, 0x17,
-	0x72, 0x31, 0x04, 0x89, 0x5b, 0x49, 0x5a, 0x5c, 0x2b, 0x94, 0xc7, 0x9a, 0x79, 0x7d, 0x15, 0x0e,
-	0x65, 0x08, 0x78, 0x0d, 0x37, 0xfe, 0x87, 0x02, 0x88, 0x1d, 0xe7, 0xae, 0xe5, 0x0c, 0x30, 0x91,
-	0xa4, 0x1e, 0x05, 0x70, 0x42, 0xa9, 0xe9, 0x59, 0x2e, 0x66, 0x64, 0x56, 0x8c, 0x0a, 0x93, 0xdc,
-	0xb4, 0x5c, 0xbc, 0x03, 0xe7, 0x63, 0xaf, 0xc0, 0x79, 0x7e, 0x4f, 0xce, 0x0b, 0xc7, 0x94, 0x7d,
-	0x70, 0x8e, 0x26, 0xa1, 0xe8, 0xd8, 0xae, 0x4d, 0x5b, 0x45, 0xe6, 0x91, 0x2f, 0xf4, 0xb3, 0x30,
-	0x91, 0x3a, 0x95, 0x60, 0xea, 0x38, 0xd4, 0xf8, 0xb1, 0xb6, 0x98, 0x9c, 0x71, 0x55, 0x31, 0xaa,
-	0x4e, 0x0c, 0xd5, 0x3f, 0x83, 0xe9, 0x84, 0x65, 0xe6, 0x26, 0xf7, 0x61, 0xff, 0xbb, 0x02, 0xe3,
-	0x37, 0x24, 0x51, 0xe4, 0x4d, 0x27, 0x69, 0x74, 0xfa, 0x7c, 0xe2, 0xf4, 0xff, 0x81, 0x46, 0xfd,
-	0x23, 0x91, 0x06, 0x22, 0x6a, 0x71, 0xde, 0x19, 0xa8, 0xc6, 0x69, 0x20, 0x8f, 0x0b, 0x51, 0x1e,
-	0x10, 0xfd, 0x13, 0x68, 0xc5, 0x66, 0x19, 0xb2, 0xf6, 0x34, 0x46, 0xd0, 0xbc, 0x43, 0x70, 0xb0,
-	0x4a, 0x2d, 0x2a, 0x89, 0xd2, 0xbf, 0x1f, 0x83, 0xf1, 0x84, 0x50, 0xb8, 0x3a, 0x29, 0xdb, 0x81,
-	0xed, 0x7b, 0x66, 0x60, 0x51, 0x9e, 0x92, 0x8a, 0x51, 0x8f, 0xa4, 0x86, 0x45, 0x71, 0x98, 0xb5,
-	0xde, 0xc0, 0x35, 0x45, 0x21, 0x84, 0x8c, 0x15, 0x8c, 0x8a, 0x37, 0x70, 0x79, 0xf6, 0x87, 0x97,
-	0x60, 0xf5, 0x6d, 0x33, 0xe3, 0x29, 0xcf, 0x3c, 0x35, 0xad, 0xbe, 0xbd, 0x94, 0x72, 0xd6, 0x86,
-	0x89, 0x60, 0xe0, 0xe0, 0x2c, 0xbc, 0xc0, 0xe0, 0xe3, 0xa1, 0x2a, 0x8d, 0x3f, 0x01, 0x75, 0xab,
-	0x4b, 0xed, 0x2d, 0x2c, 0xf7, 0x2f, 0xb2, 0xfd, 0x6b, 0x5c, 0x28, 0x42, 0x38, 0x01, 0x75, 0xc7,
-	0xb7, 0x7a, 0xb8, 0x67, 0xae, 0x3b, 0x7e, 0xf7, 0x3e, 0x69, 0x95, 0x38, 0x88, 0x0b, 0x17, 0x98,
-	0x4c, 0xff, 0x1a, 0x26, 0x42, 0x0a, 0x96, 0x2e, 0xa5, 0x49, 0x98, 0x82, 0x03, 0x03, 0x82, 0x03,
-	0xd3, 0xee, 0x89, 0x82, 0x2c, 0x85, 0xcb, 0xa5, 0x1e, 0x3a, 0x05, 0x85, 0x9e, 0x45, 0x2d, 0x76,
-	0xe0, 0x6a, 0x67, 0x5a, 0x5e, 0xf5, 0x36, 0x1a, 0x0d, 0x06, 0xd3, 0xaf, 0x00, 0x0a, 0x55, 0x24,
-	0xed, 0xfd, 0x0c, 0x14, 0x49, 0x28, 0x10, 0xef, 0xc7, 0x91, 0xa4, 0x97, 0x4c, 0x24, 0x06, 0x47,
-	0xea, 0x8f, 0x15, 0xd0, 0x96, 0x31, 0x0d, 0xec, 0x2e, 0xb9, 0xec, 0x07, 0xe9, 0xcc, 0x7a, 0xc3,
-	0x79, 0x7f, 0x16, 0x6a, 0x32, 0x75, 0x4d, 0x82, 0xe9, 0xee, 0x0f, 0x74, 0x55, 0x42, 0x57, 0x31,
-	0x8d, 0x2b, 0xa6, 0x90, 0x7c, 0x2f, 0xae, 0xc3, 0xcc, 0x8e, 0x27, 0x11, 0x04, 0xcd, 0x42, 0xc9,
-	0x65, 0x10, 0xc1, 0x50, 0x33, 0x7e, 0x61, 0xb9, 0xa9, 0x21, 0xf4, 0xfa, 0x6d, 0x38, 0xb9, 0x83,
-	0xb3, 0x4c, 0x85, 0xec, 0xdf, 0x65, 0x0b, 0x0e, 0x0b, 0x97, 0xcb, 0x98, 0x5a, 0xe1, 0x35, 0xca,
-	0x82, 0x59, 0x81, 0xa9, 0x6d, 0x1a, 0xe1, 0xfe, 0x43, 0x28, 0xbb, 0x42, 0x26, 0x36, 0x68, 0x65,
-	0x37, 0x88, 0x6c, 0x22, 0xa4, 0xfe, 0x8f, 0x02, 0x07, 0x33, 0x3d, 0x29, 0xbc, 0x98, 0x7b, 0x81,
-	0xef, 0x9a, 0x72, 0x26, 0x8b, 0x73, 0xb0, 0x11, 0xca, 0x97, 0x84, 0x78, 0xa9, 0x97, 0x4c, 0xd2,
-	0xb1, 0x54, 0x92, 0x7a, 0x50, 0x62, 0xa5, 0x2f, 0x9b, 0xe9, 0x44, 0x1c, 0x0a, 0xa3, 0xe8, 0x96,
-	0x65, 0x07, 0x0b, 0xf3, 0x61, 0x7f, 0xfa, 0xf3, 0xd9, 0xcc, 0x2b, 0x8d, 0x73, 0xdc, 0x7e, 0xbe,
-	0x67, 0xf5, 0x29, 0x0e, 0x0c, 0xb1, 0x0b, 0x7a, 0x0f, 0x4a, 0xbc, 0x85, 0xb6, 0x0a, 0x6c, 0xbf,
-	0xba, 0xcc, 0x8d, 0x64, 0x97, 0x15, 0x10, 0xfd, 0x47, 0x05, 0x8a, 0xfc, 0xa4, 0x6f, 0x2a, 0x61,
-	0x55, 0x28, 0x63, 0xaf, 0xeb, 0xf7, 0x6c, 0x6f, 0x83, 0xbd, 0x38, 0x45, 0x23, 0x5a, 0x23, 0x24,
-	0xea, 0x37, 0xcc, 0xc8, 0x9a, 0x28, 0xd2, 0x79, 0xa8, 0xa7, 0x32, 0x27, 0x35, 0x31, 0x29, 0xfb,
-	0x9a, 0x98, 0x4c, 0xa8, 0x25, 0x35, 0xe8, 0x24, 0x14, 0xe8, 0xa3, 0x3e, 0x7f, 0x3a, 0x1b, 0x9d,
-	0x71, 0x69, 0xcd, 0xd4, 0x6b, 0x8f, 0xfa, 0xd8, 0x60, 0xea, 0x30, 0x1a, 0xd6, 0xf4, 0xf9, 0xf5,
-	0xb1, 0xef, 0xb0, 0x68, 0x58, 0xc7, 0x63, 0xa1, 0x57, 0x0c, 0xbe, 0xd0, 0x7f, 0x50, 0xa0, 0x11,
-	0x67, 0xca, 0x65, 0xdb, 0xc1, 0xaf, 0x23, 0x51, 0x54, 0x28, 0xdf, 0xb3, 0x1d, 0xcc, 0x62, 0xe0,
-	0xdb, 0x45, 0xeb, 0x51, 0x4c, 0xbd, 0x7b, 0x0d, 0x2a, 0xd1, 0x11, 0x50, 0x05, 0x8a, 0x8b, 0xb7,
-	0xef, 0xcc, 0xdf, 0x68, 0xe6, 0x50, 0x1d, 0x2a, 0x37, 0x57, 0xd6, 0x4c, 0xbe, 0x54, 0xd0, 0x41,
-	0xa8, 0x1a, 0x8b, 0x57, 0x16, 0xbf, 0x30, 0x97, 0xe7, 0xd7, 0x2e, 0x5e, 0x6d, 0x8e, 0x21, 0x04,
-	0x0d, 0x2e, 0xb8, 0xb9, 0x22, 0x64, 0xf9, 0xce, 0xaf, 0x65, 0x28, 0xcb, 0x18, 0xd1, 0x39, 0x28,
-	0xdc, 0x1a, 0x90, 0x4d, 0x74, 0x38, 0xce, 0xd4, 0xcf, 0x03, 0x9b, 0x62, 0x51, 0x79, 0xea, 0xd4,
-	0x36, 0x39, 0xaf, 0x3b, 0x3d, 0x87, 0x2e, 0x40, 0x29, 0x34, 0xbd, 0xdb, 0x41, 0x71, 0xc5, 0x6d,
-	0x75, 0xd2, 0xe6, 0xd3, 0x23, 0x34, 0x91, 0x83, 0x4b, 0x50, 0x4d, 0x4c, 0x92, 0x68, 0xe4, 0x8f,
-	0x08, 0xf5, 0x48, 0x4a, 0x9a, 0x7e, 0x5b, 0xf4, 0xdc, 0x69, 0x05, 0xad, 0x40, 0x83, 0xa9, 0xe4,
-	0xd8, 0x48, 0xd0, 0x5b, 0xd2, 0x64, 0xd4, 0x28, 0xad, 0x1e, 0xdd, 0x41, 0x1b, 0x85, 0x75, 0x15,
-	0xaa, 0x89, 0xe1, 0x08, 0xa9, 0xa9, 0x0c, 0x4c, 0x4d, 0x90, 0x71, 0x70, 0x23, 0xe6, 0x30, 0x3d,
-	0x87, 0xee, 0x8a, 0x29, 0x29, 0x39, 0x66, 0xed, 0xea, 0xef, 0xf8, 0x08, 0xdd, 0x88, 0x23, 0x2f,
-	0x02, 0xc4, 0x03, 0x09, 0x9a, 0x4e, 0x19, 0x25, 0x27, 0x32, 0x55, 0x1d, 0xa5, 0x8a, 0xc2, 0x5b,
-	0x85, 0x66, 0x76, 0xae, 0xd9, 0xcd, 0xd9, 0xb1, 0xed, 0xaa, 0x11, 0xb1, 0x2d, 0x40, 0x25, 0xea,
-	0xc9, 0x51, 0x62, 0xb4, 0xb3, 0x23, 0x90, 0xba, 0x73, 0x03, 0xd7, 0x73, 0xe8, 0x32, 0xd4, 0xe6,
-	0x1d, 0x67, 0x3f, 0x6e, 0xd4, 0xa4, 0x86, 0x64, 0xfd, 0x38, 0x51, 0xdb, 0xc8, 0xf6, 0x28, 0xf4,
-	0x76, 0xf4, 0x32, 0xec, 0xda, 0xdb, 0xd5, 0x77, 0xf6, 0xc4, 0x45, 0xbb, 0x7d, 0x0b, 0x47, 0x77,
-	0xed, 0x88, 0xfb, 0xde, 0xf3, 0xd4, 0x1e, 0xb8, 0x11, 0xac, 0xaf, 0xc1, 0xc1, 0x4c, 0x83, 0x44,
-	0x5a, 0xc6, 0x4b, 0xa6, 0xa7, 0xaa, 0x33, 0x3b, 0xea, 0xa5, 0xdf, 0x85, 0x4f, 0x9f, 0x3c, 0xd7,
-	0x72, 0x4f, 0x9f, 0x6b, 0xb9, 0x97, 0xcf, 0x35, 0xe5, 0xbb, 0xa1, 0xa6, 0xfc, 0x36, 0xd4, 0x94,
-	0xc7, 0x43, 0x4d, 0x79, 0x32, 0xd4, 0x94, 0xbf, 0x86, 0x9a, 0xf2, 0xf7, 0x50, 0xcb, 0xbd, 0x1c,
-	0x6a, 0xca, 0x4f, 0x2f, 0xb4, 0xdc, 0x93, 0x17, 0x5a, 0xee, 0xe9, 0x0b, 0x2d, 0xf7, 0x65, 0xa9,
-	0xeb, 0xd8, 0xd8, 0xa3, 0xeb, 0x25, 0xf6, 0xdf, 0xc1, 0x07, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff,
-	0x63, 0x80, 0x72, 0xdc, 0xe5, 0x10, 0x00, 0x00,
+	// 1361 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x73, 0xd4, 0xc6,
+	0x13, 0x97, 0xbc, 0x0f, 0x76, 0x7b, 0x1f, 0xac, 0xc7, 0x06, 0xaf, 0xc5, 0x1f, 0x19, 0x44, 0xf1,
+	0x8f, 0x2b, 0x09, 0x6b, 0xd8, 0x24, 0x55, 0x90, 0x17, 0x65, 0x83, 0x01, 0x03, 0xc6, 0x20, 0x1b,
+	0x27, 0x95, 0x4a, 0x4a, 0x25, 0xef, 0x0e, 0xb6, 0x82, 0x1e, 0x8b, 0x34, 0x4b, 0x41, 0x4e, 0x49,
+	0xe5, 0x03, 0x24, 0xc7, 0x5c, 0x73, 0x4a, 0x3e, 0x40, 0x3e, 0x04, 0x47, 0x0e, 0x39, 0x50, 0x39,
+	0x50, 0x61, 0xb9, 0xe4, 0x48, 0xbe, 0x41, 0x4a, 0xf3, 0xd0, 0x4a, 0xb2, 0xd6, 0x5e, 0x52, 0x38,
+	0x37, 0x4d, 0xf7, 0xaf, 0x7b, 0xba, 0x7f, 0xd3, 0x33, 0xdd, 0xbb, 0x50, 0xb7, 0xdc, 0x6d, 0x1c,
+	0x10, 0xec, 0xb7, 0x7a, 0xbe, 0x47, 0x3c, 0x54, 0xec, 0x78, 0x3e, 0xc1, 0x8f, 0x94, 0xe9, 0x6d,
+	0x6f, 0xdb, 0xa3, 0xa2, 0x85, 0xf0, 0x8b, 0x69, 0x95, 0x0b, 0xdb, 0x16, 0xd9, 0xe9, 0x6f, 0xb5,
+	0x3a, 0x9e, 0xb3, 0xc0, 0x80, 0x3d, 0xdf, 0xfb, 0x1a, 0x77, 0x08, 0x5f, 0x2d, 0xf4, 0xee, 0x6f,
+	0x0b, 0xc5, 0x16, 0xff, 0x60, 0xa6, 0xda, 0x27, 0x50, 0xd1, 0xb1, 0xd9, 0xd5, 0xf1, 0x83, 0x3e,
+	0x0e, 0x08, 0x6a, 0xc1, 0xa1, 0x07, 0x7d, 0xec, 0x5b, 0x38, 0x68, 0xca, 0x27, 0x72, 0xf3, 0x95,
+	0xf6, 0x74, 0x8b, 0xc3, 0xef, 0xf4, 0xb1, 0xff, 0x98, 0xc3, 0x74, 0x01, 0xd2, 0x2e, 0x42, 0x95,
+	0x99, 0x07, 0x3d, 0xcf, 0x0d, 0x30, 0x5a, 0x80, 0x43, 0x3e, 0x0e, 0xfa, 0x36, 0x11, 0xf6, 0x47,
+	0x52, 0xf6, 0x0c, 0xa7, 0x0b, 0x94, 0x76, 0x03, 0x6a, 0x09, 0x0d, 0xfa, 0x10, 0x80, 0x58, 0x0e,
+	0x0e, 0xb2, 0x82, 0xe8, 0x6d, 0xb5, 0x36, 0x2c, 0x07, 0xaf, 0x53, 0xdd, 0x52, 0xfe, 0xc9, 0xf3,
+	0x39, 0x49, 0x8f, 0xa1, 0xb5, 0x9f, 0x64, 0xa8, 0xc6, 0xe3, 0x44, 0xef, 0x02, 0x0a, 0x88, 0xe9,
+	0x13, 0x83, 0x82, 0x88, 0xe9, 0xf4, 0x0c, 0x27, 0x74, 0x2a, 0xcf, 0xe7, 0xf4, 0x06, 0xd5, 0x6c,
+	0x08, 0xc5, 0x6a, 0x80, 0xe6, 0xa1, 0x81, 0xdd, 0x6e, 0x12, 0x3b, 0x41, 0xb1, 0x75, 0xec, 0x76,
+	0xe3, 0xc8, 0xb3, 0x50, 0x72, 0x4c, 0xd2, 0xd9, 0xc1, 0x7e, 0xd0, 0xcc, 0x25, 0x79, 0xba, 0x69,
+	0x6e, 0x61, 0x7b, 0x95, 0x29, 0xf5, 0x08, 0xa5, 0xfd, 0x2c, 0xc3, 0xf4, 0xf2, 0x23, 0xec, 0xf4,
+	0x6c, 0xd3, 0xff, 0x4f, 0x42, 0x3c, 0xb7, 0x2b, 0xc4, 0x23, 0x59, 0x21, 0x06, 0xb1, 0x18, 0xbf,
+	0x84, 0x29, 0x1a, 0xda, 0x3a, 0xf1, 0xb1, 0xe9, 0x44, 0x27, 0x72, 0x11, 0x2a, 0x9d, 0x9d, 0xbe,
+	0x7b, 0x3f, 0x71, 0x24, 0x33, 0xc2, 0xd9, 0xf0, 0x40, 0x2e, 0x85, 0x20, 0x7e, 0x2a, 0x71, 0x8b,
+	0xeb, 0xf9, 0xd2, 0x44, 0x23, 0xa7, 0xad, 0xc3, 0x91, 0x14, 0x01, 0x6f, 0xe0, 0xc4, 0x7f, 0x97,
+	0x01, 0xd1, 0x74, 0x36, 0x4d, 0xbb, 0x8f, 0x03, 0x41, 0xea, 0x71, 0x00, 0x3b, 0x94, 0x1a, 0xae,
+	0xe9, 0x60, 0x4a, 0x66, 0x59, 0x2f, 0x53, 0xc9, 0x2d, 0xd3, 0xc1, 0x23, 0x38, 0x9f, 0x78, 0x0d,
+	0xce, 0x73, 0xfb, 0x72, 0x9e, 0x3f, 0x21, 0x8f, 0xc1, 0x39, 0x9a, 0x86, 0x82, 0x6d, 0x39, 0x16,
+	0x69, 0x16, 0xa8, 0x47, 0xb6, 0xd0, 0xce, 0xc3, 0x54, 0x22, 0x2b, 0xce, 0xd4, 0x49, 0xa8, 0xb2,
+	0xb4, 0x1e, 0x52, 0x39, 0xe5, 0xaa, 0xac, 0x57, 0xec, 0x21, 0x54, 0xfb, 0x14, 0x66, 0x63, 0x96,
+	0xa9, 0x93, 0x1c, 0xc3, 0xfe, 0x37, 0x19, 0x26, 0x6f, 0x0a, 0xa2, 0x82, 0x83, 0x2e, 0xd2, 0x28,
+	0xfb, 0x5c, 0x2c, 0xfb, 0x7f, 0x41, 0xa3, 0xf6, 0x01, 0x2f, 0x03, 0x1e, 0x35, 0xcf, 0x77, 0x0e,
+	0x2a, 0xc3, 0x32, 0x10, 0xe9, 0x42, 0x54, 0x07, 0x81, 0xf6, 0x11, 0x34, 0x87, 0x66, 0x29, 0xb2,
+	0xf6, 0x35, 0x46, 0xd0, 0xb8, 0x1b, 0x60, 0x7f, 0x9d, 0x98, 0x44, 0x10, 0xa5, 0x7d, 0x37, 0x01,
+	0x93, 0x31, 0x21, 0x77, 0x75, 0x5a, 0xbc, 0xe7, 0x96, 0xe7, 0x1a, 0xbe, 0x49, 0x58, 0x49, 0xca,
+	0x7a, 0x2d, 0x92, 0xea, 0x26, 0xc1, 0x61, 0xd5, 0xba, 0x7d, 0xc7, 0xe0, 0x17, 0x21, 0x64, 0x2c,
+	0xaf, 0x97, 0xdd, 0xbe, 0xc3, 0xaa, 0x3f, 0x3c, 0x04, 0xb3, 0x67, 0x19, 0x29, 0x4f, 0x39, 0xea,
+	0xa9, 0x61, 0xf6, 0xac, 0x95, 0x84, 0xb3, 0x16, 0x4c, 0xf9, 0x7d, 0x1b, 0xa7, 0xe1, 0x79, 0x0a,
+	0x9f, 0x0c, 0x55, 0x49, 0xfc, 0x29, 0xa8, 0x99, 0x1d, 0x62, 0x3d, 0xc4, 0x62, 0xff, 0x02, 0xdd,
+	0xbf, 0xca, 0x84, 0x3c, 0x84, 0x53, 0x50, 0xb3, 0x3d, 0xb3, 0x8b, 0xbb, 0xc6, 0x96, 0xed, 0x75,
+	0xee, 0x07, 0xcd, 0x22, 0x03, 0x31, 0xe1, 0x12, 0x95, 0x69, 0x5f, 0xc1, 0x54, 0x48, 0xc1, 0xca,
+	0xe5, 0x24, 0x09, 0x33, 0x70, 0xa8, 0x1f, 0x60, 0xdf, 0xb0, 0xba, 0xfc, 0x42, 0x16, 0xc3, 0xe5,
+	0x4a, 0x17, 0x9d, 0x81, 0x7c, 0xd7, 0x24, 0x26, 0x4d, 0xb8, 0xd2, 0x9e, 0x15, 0x47, 0xbd, 0x8b,
+	0x46, 0x9d, 0xc2, 0xb4, 0xab, 0x80, 0x42, 0x55, 0x90, 0xf4, 0x7e, 0x0e, 0x0a, 0x41, 0x28, 0xe0,
+	0xef, 0xc7, 0xb1, 0xb8, 0x97, 0x54, 0x24, 0x3a, 0x43, 0x6a, 0x4f, 0x64, 0x50, 0x57, 0x31, 0xf1,
+	0xad, 0x4e, 0x70, 0xc5, 0xf3, 0x93, 0x95, 0x75, 0xc0, 0x75, 0x7f, 0x1e, 0xaa, 0xa2, 0x74, 0x8d,
+	0x00, 0x93, 0xbd, 0x1f, 0xe8, 0x8a, 0x80, 0xae, 0x63, 0x32, 0xbc, 0x31, 0xf9, 0xf8, 0x7b, 0x71,
+	0x03, 0xe6, 0x46, 0x66, 0xc2, 0x09, 0x9a, 0x87, 0xa2, 0x43, 0x21, 0x9c, 0xa1, 0xc6, 0xf0, 0x85,
+	0x65, 0xa6, 0x3a, 0xd7, 0x6b, 0x77, 0xe0, 0xf4, 0x08, 0x67, 0xa9, 0x1b, 0x32, 0xbe, 0xcb, 0x26,
+	0x1c, 0xe5, 0x2e, 0x57, 0x31, 0x31, 0xc3, 0x63, 0x14, 0x17, 0x66, 0x0d, 0x66, 0x76, 0x69, 0xb8,
+	0xfb, 0xf7, 0xa1, 0xe4, 0x70, 0x19, 0xdf, 0xa0, 0x99, 0xde, 0x20, 0xb2, 0x89, 0x90, 0xda, 0xdf,
+	0x32, 0x1c, 0x4e, 0xf5, 0xa4, 0xf0, 0x60, 0xee, 0xf9, 0x9e, 0x63, 0x88, 0xa1, 0x6a, 0x58, 0x83,
+	0xf5, 0x50, 0xbe, 0xc2, 0xc5, 0x2b, 0xdd, 0x78, 0x91, 0x4e, 0x24, 0x8a, 0xd4, 0x85, 0x22, 0xbd,
+	0xfa, 0xa2, 0x99, 0x4e, 0x0d, 0x43, 0xa1, 0x14, 0xdd, 0x36, 0x2d, 0x7f, 0x69, 0x31, 0xec, 0x4f,
+	0x7f, 0x3c, 0x9f, 0x7b, 0xad, 0x79, 0x8c, 0xd9, 0x2f, 0x76, 0xcd, 0x1e, 0xc1, 0xbe, 0xce, 0x77,
+	0x41, 0xef, 0x40, 0x91, 0xb5, 0xd0, 0x66, 0x9e, 0xee, 0x57, 0x13, 0xb5, 0x11, 0xef, 0xb2, 0x1c,
+	0xa2, 0xfd, 0x20, 0x43, 0x81, 0x65, 0x7a, 0x50, 0x05, 0xab, 0x40, 0x09, 0xbb, 0x1d, 0xaf, 0x6b,
+	0xb9, 0xdb, 0xf4, 0xc5, 0x29, 0xe8, 0xd1, 0x1a, 0x21, 0x7e, 0x7f, 0xc3, 0x8a, 0xac, 0xf2, 0x4b,
+	0xba, 0x08, 0xb5, 0x44, 0xe5, 0x24, 0x26, 0x26, 0x79, 0xac, 0x89, 0xc9, 0x80, 0x6a, 0x5c, 0x83,
+	0x4e, 0x43, 0x9e, 0x3c, 0xee, 0xb1, 0xa7, 0xb3, 0xde, 0x9e, 0x14, 0xd6, 0x54, 0xbd, 0xf1, 0xb8,
+	0x87, 0x75, 0xaa, 0x0e, 0xa3, 0xa1, 0x4d, 0x9f, 0x1d, 0x1f, 0xfd, 0x0e, 0x2f, 0x0d, 0xed, 0x78,
+	0x34, 0xf4, 0xb2, 0xce, 0x16, 0xda, 0xf7, 0x32, 0xd4, 0x87, 0x95, 0x72, 0xc5, 0xb2, 0xf1, 0x9b,
+	0x28, 0x14, 0x05, 0x4a, 0xf7, 0x2c, 0x1b, 0xd3, 0x18, 0xd8, 0x76, 0xd1, 0x3a, 0x8b, 0xa9, 0xb7,
+	0xaf, 0x43, 0x39, 0x4a, 0x01, 0x95, 0xa1, 0xb0, 0x7c, 0xe7, 0xee, 0xe2, 0xcd, 0x86, 0x84, 0x6a,
+	0x50, 0xbe, 0xb5, 0xb6, 0x61, 0xb0, 0xa5, 0x8c, 0x0e, 0x43, 0x45, 0x5f, 0xbe, 0xba, 0xfc, 0xb9,
+	0xb1, 0xba, 0xb8, 0x71, 0xe9, 0x5a, 0x63, 0x02, 0x21, 0xa8, 0x33, 0xc1, 0xad, 0x35, 0x2e, 0xcb,
+	0xb5, 0x7f, 0x29, 0x41, 0x49, 0xc4, 0x88, 0x2e, 0x40, 0xfe, 0x76, 0x3f, 0xd8, 0x41, 0x47, 0x87,
+	0x95, 0xfa, 0x99, 0x6f, 0x11, 0xcc, 0x6f, 0x9e, 0x32, 0xb3, 0x4b, 0xce, 0xee, 0x9d, 0x26, 0xa1,
+	0x8b, 0x50, 0x0c, 0x4d, 0x37, 0xdb, 0xa8, 0x99, 0x6d, 0xbc, 0xd9, 0x56, 0x66, 0x47, 0x98, 0x6f,
+	0xb6, 0x35, 0x09, 0x5d, 0x86, 0x4a, 0x6c, 0x92, 0x44, 0x99, 0x3f, 0x22, 0x94, 0x63, 0x09, 0x69,
+	0xf2, 0x6d, 0xd1, 0xa4, 0xb3, 0x32, 0x5a, 0x83, 0x3a, 0x55, 0x89, 0xb1, 0x31, 0x40, 0xff, 0x13,
+	0x26, 0x59, 0xa3, 0xb4, 0x72, 0x7c, 0x84, 0x36, 0xca, 0xeb, 0x1a, 0x54, 0x62, 0xc3, 0x11, 0x52,
+	0x12, 0x15, 0x98, 0x98, 0x20, 0x87, 0xc1, 0x65, 0xcc, 0x61, 0x9a, 0x84, 0x36, 0xf9, 0x94, 0x14,
+	0x1f, 0xb3, 0xf6, 0xf4, 0x77, 0x32, 0x43, 0x97, 0x91, 0xf2, 0x32, 0xc0, 0x70, 0x20, 0x41, 0xb3,
+	0x09, 0xa3, 0xf8, 0x44, 0xa6, 0x28, 0x59, 0xaa, 0x28, 0xbc, 0x75, 0x68, 0xa4, 0xe7, 0x9a, 0xbd,
+	0x9c, 0x9d, 0xd8, 0xad, 0xca, 0x88, 0x6d, 0x09, 0xca, 0x51, 0x4f, 0x8e, 0x0a, 0xa3, 0x95, 0x1e,
+	0x81, 0x94, 0xd1, 0x0d, 0x5c, 0x93, 0xd0, 0x15, 0xa8, 0x2e, 0xda, 0xf6, 0x38, 0x6e, 0x94, 0xb8,
+	0x26, 0x48, 0xfb, 0xb1, 0xa3, 0xb6, 0x91, 0xee, 0x51, 0xe8, 0xff, 0xd1, 0xcb, 0xb0, 0x67, 0x6f,
+	0x57, 0xde, 0xda, 0x17, 0x17, 0xed, 0xf6, 0x0d, 0x1c, 0xdf, 0xb3, 0x23, 0x8e, 0xbd, 0xe7, 0x99,
+	0x7d, 0x70, 0x19, 0xac, 0x6f, 0xc0, 0xe1, 0x54, 0x83, 0x44, 0x6a, 0xca, 0x4b, 0xaa, 0xa7, 0x2a,
+	0x73, 0x23, 0xf5, 0xc2, 0xef, 0xd2, 0xc7, 0x4f, 0x5f, 0xa8, 0xd2, 0xb3, 0x17, 0xaa, 0xf4, 0xea,
+	0x85, 0x2a, 0x7f, 0x3b, 0x50, 0xe5, 0x5f, 0x07, 0xaa, 0xfc, 0x64, 0xa0, 0xca, 0x4f, 0x07, 0xaa,
+	0xfc, 0xe7, 0x40, 0x95, 0xff, 0x1a, 0xa8, 0xd2, 0xab, 0x81, 0x2a, 0xff, 0xf8, 0x52, 0x95, 0x9e,
+	0xbe, 0x54, 0xa5, 0x67, 0x2f, 0x55, 0xe9, 0x8b, 0x62, 0xc7, 0xb6, 0xb0, 0x4b, 0xb6, 0x8a, 0xf4,
+	0xbf, 0x83, 0xf7, 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, 0x64, 0x59, 0xb6, 0x88, 0xa6, 0x10, 0x00,
+	0x00,
 }
 
 func (x MatchType) String() string {
@@ -2782,7 +2781,7 @@ const _ = grpc.SupportPackageIsVersion4
 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type IngesterClient interface {
 	Push(ctx context.Context, in *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error)
-	PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error)
+	PushV2(ctx context.Context, in *cortexpb.WriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error)
 	QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error)
 	QueryExemplars(ctx context.Context, in *ExemplarQueryRequest, opts ...grpc.CallOption) (*ExemplarQueryResponse, error)
 	LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error)
@@ -2813,8 +2812,8 @@ func (c *ingesterClient) Push(ctx context.Context, in *cortexpb.WriteRequest, op
 	return out, nil
 }
 
-func (c *ingesterClient) PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) {
-	out := new(cortexpbv2.WriteResponse)
+func (c *ingesterClient) PushV2(ctx context.Context, in *cortexpb.WriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) {
+	out := new(cortexpb.WriteResponseV2)
 	err := c.cc.Invoke(ctx, "/cortex.Ingester/PushV2", in, out, opts...)
 	if err != nil {
 		return nil, err
@@ -3016,7 +3015,7 @@ func (c *ingesterClient) MetricsMetadata(ctx context.Context, in *MetricsMetadat
 // IngesterServer is the server API for Ingester service.
 type IngesterServer interface {
 	Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error)
-	PushV2(context.Context, *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error)
+	PushV2(context.Context, *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error)
 	QueryStream(*QueryRequest, Ingester_QueryStreamServer) error
 	QueryExemplars(context.Context, *ExemplarQueryRequest) (*ExemplarQueryResponse, error)
 	LabelValues(context.Context, *LabelValuesRequest) (*LabelValuesResponse, error)
@@ -3037,7 +3036,7 @@ type UnimplementedIngesterServer struct {
 func (*UnimplementedIngesterServer) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) {
 	return nil, status.Errorf(codes.Unimplemented, "method Push not implemented")
 }
-func (*UnimplementedIngesterServer) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+func (*UnimplementedIngesterServer) PushV2(ctx context.Context, req *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) {
 	return nil, status.Errorf(codes.Unimplemented, "method PushV2 not implemented")
 }
 func (*UnimplementedIngesterServer) QueryStream(req *QueryRequest, srv Ingester_QueryStreamServer) error {
@@ -3097,7 +3096,7 @@ func _Ingester_Push_Handler(srv interface{}, ctx context.Context, dec func(inter
 }
 
 func _Ingester_PushV2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(cortexpbv2.WriteRequest)
+	in := new(cortexpb.WriteRequestV2)
 	if err := dec(in); err != nil {
 		return nil, err
 	}
@@ -3109,7 +3108,7 @@ func _Ingester_PushV2_Handler(srv interface{}, ctx context.Context, dec func(int
 		FullMethod: "/cortex.Ingester/PushV2",
 	}
 	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(IngesterServer).PushV2(ctx, req.(*cortexpbv2.WriteRequest))
+		return srv.(IngesterServer).PushV2(ctx, req.(*cortexpb.WriteRequestV2))
 	}
 	return interceptor(ctx, in, info, handler)
 }
diff --git a/pkg/ingester/client/ingester.proto b/pkg/ingester/client/ingester.proto
index 01f7be6f83..eee2e82b82 100644
--- a/pkg/ingester/client/ingester.proto
+++ b/pkg/ingester/client/ingester.proto
@@ -7,14 +7,13 @@ option go_package = "client";
 
 import "gogoproto/gogo.proto";
 import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto";
-import "github.com/cortexproject/cortex/pkg/cortexpbv2/cortexv2.proto";
 
 option (gogoproto.marshaler_all) = true;
 option (gogoproto.unmarshaler_all) = true;
 
 service Ingester {
   rpc Push(cortexpb.WriteRequest) returns (cortexpb.WriteResponse) {};
-  rpc PushV2(cortexpbv2.WriteRequest) returns (cortexpbv2.WriteResponse) {};
+  rpc PushV2(cortexpb.WriteRequestV2) returns (cortexpb.WriteResponseV2) {};
   
   rpc QueryStream(QueryRequest) returns (stream QueryStreamResponse) {};
   rpc QueryExemplars(ExemplarQueryRequest) returns (ExemplarQueryResponse) {};
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index e8891c6a59..e17bfd4b3a 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -44,7 +44,6 @@ import (
 
 	"github.com/cortexproject/cortex/pkg/chunk/encoding"
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/ingester/client"
 	"github.com/cortexproject/cortex/pkg/querysharding"
 	"github.com/cortexproject/cortex/pkg/ring"
@@ -1057,7 +1056,7 @@ type extendedAppender interface {
 	storage.GetRef
 }
 
-func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+func (i *Ingester) PushV2(ctx context.Context, req *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) {
 	if err := i.checkRunning(); err != nil {
 		return nil, err
 	}
@@ -1080,7 +1079,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c
 
 	// NOTE: because we use `unsafe` in deserialisation, we must not
 	// retain anything from `req` past the call to ReuseSlice
-	defer cortexpbv2.ReuseSlice(req.Timeseries)
+	defer cortexpb.ReuseSliceV2(req.Timeseries)
 
 	userID, err := tenant.TenantID(ctx)
 	if err != nil {
@@ -1108,7 +1107,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c
 	i.stoppedMtx.RUnlock()
 
 	if err := db.acquireAppendLock(); err != nil {
-		return &cortexpbv2.WriteResponse{}, httpgrpc.Errorf(http.StatusServiceUnavailable, wrapWithUser(err, userID).Error())
+		return &cortexpb.WriteResponseV2{}, httpgrpc.Errorf(http.StatusServiceUnavailable, wrapWithUser(err, userID).Error())
 	}
 	defer db.releaseAppendLock()
 
@@ -1220,7 +1219,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c
 
 			// If the cached reference exists, we try to use it.
 			if ref != 0 {
-				if _, err = app.Append(ref, copiedLabels, s.Timestamp, s.Value); err == nil {
+				if _, err = app.Append(ref, copiedLabels, s.TimestampMs, s.Value); err == nil {
 					succeededSamplesCount++
 					continue
 				}
@@ -1230,7 +1229,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c
 				copiedLabels = cortexpb.FromLabelAdaptersToLabelsWithCopy(seriesLabels)
 
 				// Retain the reference in case there are multiple samples for the series.
-				if ref, err = app.Append(0, copiedLabels, s.Timestamp, s.Value); err == nil {
+				if ref, err = app.Append(0, copiedLabels, s.TimestampMs, s.Value); err == nil {
 					succeededSamplesCount++
 					continue
 				}
@@ -1238,7 +1237,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c
 
 			failedSamplesCount++
 
-			if rollback := handleAppendFailure(err, s.Timestamp, seriesLabels, copiedLabels); !rollback {
+			if rollback := handleAppendFailure(err, s.TimestampMs, seriesLabels, copiedLabels); !rollback {
 				continue
 			}
 			// The error looks an issue on our side, so we should rollback
@@ -1258,13 +1257,13 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c
 				)
 
 				if hp.GetCountFloat() > 0 {
-					fh = cortexpbv2.FloatHistogramProtoToFloatHistogram(hp)
+					fh = cortexpb.FloatHistogramProtoToFloatHistogram(hp)
 				} else {
-					h = cortexpbv2.HistogramProtoToHistogram(hp)
+					h = cortexpb.HistogramProtoToHistogram(hp)
 				}
 
 				if ref != 0 {
-					if _, err = app.AppendHistogram(ref, copiedLabels, hp.Timestamp, h, fh); err == nil {
+					if _, err = app.AppendHistogram(ref, copiedLabels, hp.TimestampMs, h, fh); err == nil {
 						succeededHistogramCount++
 						succeededSamplesCount++
 						continue
@@ -1272,7 +1271,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c
 				} else {
 					// Copy the label set because both TSDB and the active series tracker may retain it.
 					copiedLabels = cortexpb.FromLabelAdaptersToLabelsWithCopy(seriesLabels)
-					if ref, err = app.AppendHistogram(0, copiedLabels, hp.Timestamp, h, fh); err == nil {
+					if ref, err = app.AppendHistogram(0, copiedLabels, hp.TimestampMs, h, fh); err == nil {
 						succeededHistogramCount++
 						succeededSamplesCount++
 						continue
@@ -1281,7 +1280,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c
 
 				failedSamplesCount++
 
-				if rollback := handleAppendFailure(err, hp.Timestamp, seriesLabels, copiedLabels); !rollback {
+				if rollback := handleAppendFailure(err, hp.TimestampMs, seriesLabels, copiedLabels); !rollback {
 					continue
 				}
 				// The error looks an issue on our side, so we should rollback
@@ -1335,7 +1334,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c
 			}
 		}
 
-		if ts.Metadata.Type != cortexpbv2.METRIC_TYPE_UNSPECIFIED {
+		if ts.Metadata.Type != cortexpb.UNKNOWN {
 			metaData := ts.Metadata.ToV1Metadata(tsLabels.Get(model.MetricNameLabel), req.Symbols)
 			if err := i.appendMetadata(userID, metaData); err == nil {
 				succeededMetadataCount++
@@ -1399,9 +1398,9 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c
 	i.ingestionRate.Add(int64(succeededSamplesCount + succeededMetadataCount))
 
 	switch req.Source {
-	case cortexpbv2.RULE:
+	case cortexpb.RULE:
 		db.ingestedRuleSamples.Add(int64(succeededSamplesCount))
-	case cortexpbv2.API:
+	case cortexpb.API:
 		fallthrough
 	default:
 		db.ingestedAPISamples.Add(int64(succeededSamplesCount))
@@ -1414,10 +1413,10 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c
 			code = ve.code
 		}
 		level.Debug(logutil.WithContext(ctx, i.logger)).Log("msg", "partial failures to push", "totalSamples", succeededSamplesCount+failedSamplesCount, "failedSamples", failedSamplesCount, "firstPartialErr", firstPartialErr)
-		return &cortexpbv2.WriteResponse{}, httpgrpc.Errorf(code, wrapWithUser(firstPartialErr, userID).Error())
+		return &cortexpb.WriteResponseV2{}, httpgrpc.Errorf(code, wrapWithUser(firstPartialErr, userID).Error())
 	}
 
-	writeResponse := &cortexpbv2.WriteResponse{
+	writeResponse := &cortexpb.WriteResponseV2{
 		Samples:    int64(succeededSamplesCount),
 		Histograms: int64(succeededHistogramCount),
 		Exemplars:  int64(succeededExemplarsCount),
diff --git a/pkg/ingester/ingester_prw2_test.go b/pkg/ingester/ingester_prw2_test.go
index ac132527fb..cb1c51360c 100644
--- a/pkg/ingester/ingester_prw2_test.go
+++ b/pkg/ingester/ingester_prw2_test.go
@@ -42,10 +42,8 @@ import (
 	"golang.org/x/sync/errgroup"
 	"google.golang.org/grpc"
 
-	"github.com/cortexproject/cortex/pkg/chunk"
 	"github.com/cortexproject/cortex/pkg/chunk/encoding"
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/ingester/client"
 	"github.com/cortexproject/cortex/pkg/ring"
 	cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb"
@@ -68,12 +66,10 @@ func TestIngesterPRW2_Push(t *testing.T) {
 	}
 	userID := "test"
 
-	testHistogramV2 := cortexpbv2.HistogramToHistogramProto(10, tsdbutil.GenerateTestHistogram(1))
 	testHistogram := cortexpb.HistogramToHistogramProto(10, tsdbutil.GenerateTestHistogram(1))
-	testFloatHistogramV2 := cortexpbv2.FloatHistogramToHistogramProto(11, tsdbutil.GenerateTestFloatHistogram(1))
 	testFloatHistogram := cortexpb.FloatHistogramToHistogramProto(11, tsdbutil.GenerateTestFloatHistogram(1))
 	tests := map[string]struct {
-		reqs                      []*cortexpbv2.WriteRequest
+		reqs                      []*cortexpb.WriteRequestV2
 		expectedErr               error
 		expectedIngested          []cortexpb.TimeSeries
 		expectedMetadataIngested  []*cortexpb.MetricMetadata
@@ -86,13 +82,13 @@ func TestIngesterPRW2_Push(t *testing.T) {
 		disableNativeHistogram    bool
 	}{
 		"should record native histogram discarded": {
-			reqs: []*cortexpbv2.WriteRequest{
-				cortexpbv2.ToWriteRequestV2(
+			reqs: []*cortexpb.WriteRequestV2{
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 2, Timestamp: 10}},
-					[]cortexpbv2.Histogram{{Timestamp: 10}},
-					[]cortexpbv2.Metadata{{Type: cortexpbv2.METRIC_TYPE_GAUGE, HelpRef: 3}},
-					cortexpbv2.API,
+					[]cortexpb.Sample{{Value: 2, TimestampMs: 10}},
+					[]cortexpb.Histogram{{TimestampMs: 10}},
+					[]cortexpb.MetadataV2{{Type: cortexpb.GAUGE, HelpRef: 3}},
+					cortexpb.API,
 					"a help for metric_name_2"),
 			},
 			expectedErr: nil,
@@ -132,20 +128,20 @@ func TestIngesterPRW2_Push(t *testing.T) {
 			`,
 		},
 		"should succeed on valid series and metadata": {
-			reqs: []*cortexpbv2.WriteRequest{
-				cortexpbv2.ToWriteRequestV2(
+			reqs: []*cortexpb.WriteRequestV2{
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+					[]cortexpb.Sample{{Value: 1, TimestampMs: 9}},
 					nil,
-					[]cortexpbv2.Metadata{{HelpRef: 3, Type: cortexpbv2.METRIC_TYPE_COUNTER}},
-					cortexpbv2.API,
+					[]cortexpb.MetadataV2{{HelpRef: 3, Type: cortexpb.COUNTER}},
+					cortexpb.API,
 					"a help for metric_name_1"),
-				cortexpbv2.ToWriteRequestV2(
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 2, Timestamp: 10}},
+					[]cortexpb.Sample{{Value: 2, TimestampMs: 10}},
 					nil,
-					[]cortexpbv2.Metadata{{HelpRef: 3, Type: cortexpbv2.METRIC_TYPE_GAUGE}},
-					cortexpbv2.API,
+					[]cortexpb.MetadataV2{{HelpRef: 3, Type: cortexpb.GAUGE}},
+					cortexpb.API,
 					"a help for metric_name_2"),
 			},
 			expectedErr: nil,
@@ -202,21 +198,21 @@ func TestIngesterPRW2_Push(t *testing.T) {
 		},
 		"should succeed on valid series with exemplars": {
 			maxExemplars: 2,
-			reqs: []*cortexpbv2.WriteRequest{
+			reqs: []*cortexpb.WriteRequestV2{
 				// Ingesting an exemplar requires a sample to create the series first
-				cortexpbv2.ToWriteRequestV2(
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+					[]cortexpb.Sample{{Value: 1, TimestampMs: 9}},
 					nil,
 					nil,
-					cortexpbv2.API),
+					cortexpb.API),
 				{
 					Symbols: []string{"", "__name__", "test", "traceID", "123", "456"},
-					Timeseries: []cortexpbv2.PreallocTimeseriesV2{
+					Timeseries: []cortexpb.PreallocTimeseriesV2{
 						{
-							TimeSeries: &cortexpbv2.TimeSeries{
+							TimeSeriesV2: &cortexpb.TimeSeriesV2{
 								LabelsRefs: []uint32{1, 2},
-								Exemplars: []cortexpbv2.Exemplar{
+								Exemplars: []cortexpb.ExemplarV2{
 									{
 										LabelsRefs: []uint32{3, 4},
 										Timestamp:  1000,
@@ -309,19 +305,19 @@ func TestIngesterPRW2_Push(t *testing.T) {
 		},
 		"successful push, active series disabled": {
 			disableActiveSeries: true,
-			reqs: []*cortexpbv2.WriteRequest{
-				cortexpbv2.ToWriteRequestV2(
+			reqs: []*cortexpb.WriteRequestV2{
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+					[]cortexpb.Sample{{Value: 1, TimestampMs: 9}},
 					nil,
 					nil,
-					cortexpbv2.API),
-				cortexpbv2.ToWriteRequestV2(
+					cortexpb.API),
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 2, Timestamp: 10}},
+					[]cortexpb.Sample{{Value: 2, TimestampMs: 10}},
 					nil,
 					nil,
-					cortexpbv2.API),
+					cortexpb.API),
 			},
 			expectedErr: nil,
 			expectedIngested: []cortexpb.TimeSeries{
@@ -349,21 +345,21 @@ func TestIngesterPRW2_Push(t *testing.T) {
 			`,
 		},
 		"ooo disabled, should soft fail on sample out of order": {
-			reqs: []*cortexpbv2.WriteRequest{
-				cortexpbv2.ToWriteRequestV2(
+			reqs: []*cortexpb.WriteRequestV2{
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 2, Timestamp: 10}},
+					[]cortexpb.Sample{{Value: 2, TimestampMs: 10}},
 					nil,
 					nil,
-					cortexpbv2.API),
-				cortexpbv2.ToWriteRequestV2(
+					cortexpb.API),
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
-					[]cortexpbv2.Histogram{
-						cortexpbv2.HistogramToHistogramProto(9, tsdbutil.GenerateTestHistogram(1)),
+					[]cortexpb.Sample{{Value: 1, TimestampMs: 9}},
+					[]cortexpb.Histogram{
+						cortexpb.HistogramToHistogramProto(9, tsdbutil.GenerateTestHistogram(1)),
 					},
 					nil,
-					cortexpbv2.API),
+					cortexpb.API),
 			},
 			expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.ErrOutOfOrderSample, model.Time(9), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()),
 			expectedIngested: []cortexpb.TimeSeries{
@@ -410,21 +406,21 @@ func TestIngesterPRW2_Push(t *testing.T) {
 			`,
 		},
 		"ooo disabled, should soft fail on sample out of bound": {
-			reqs: []*cortexpbv2.WriteRequest{
-				cortexpbv2.ToWriteRequestV2(
+			reqs: []*cortexpb.WriteRequestV2{
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 2, Timestamp: 1575043969}},
+					[]cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}},
 					nil,
 					nil,
-					cortexpbv2.API),
-				cortexpbv2.ToWriteRequestV2(
+					cortexpb.API),
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 1, Timestamp: 1575043969 - (86400 * 1000)}},
-					[]cortexpbv2.Histogram{
-						cortexpbv2.HistogramToHistogramProto(1575043969-(86400*1000), tsdbutil.GenerateTestHistogram(1)),
+					[]cortexpb.Sample{{Value: 1, TimestampMs: 1575043969 - (86400 * 1000)}},
+					[]cortexpb.Histogram{
+						cortexpb.HistogramToHistogramProto(1575043969-(86400*1000), tsdbutil.GenerateTestHistogram(1)),
 					},
 					nil,
-					cortexpbv2.API),
+					cortexpb.API),
 			},
 			expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.ErrOutOfBounds, model.Time(1575043969-(86400*1000)), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()),
 			expectedIngested: []cortexpb.TimeSeries{
@@ -459,19 +455,19 @@ func TestIngesterPRW2_Push(t *testing.T) {
 			`,
 		},
 		"ooo enabled, should soft fail on sample too old": {
-			reqs: []*cortexpbv2.WriteRequest{
-				cortexpbv2.ToWriteRequestV2(
+			reqs: []*cortexpb.WriteRequestV2{
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 2, Timestamp: 1575043969}},
+					[]cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}},
 					nil,
 					nil,
-					cortexpbv2.API),
-				cortexpbv2.ToWriteRequestV2(
+					cortexpb.API),
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 1, Timestamp: 1575043969 - (600 * 1000)}},
+					[]cortexpb.Sample{{Value: 1, TimestampMs: 1575043969 - (600 * 1000)}},
 					nil,
 					nil,
-					cortexpbv2.API),
+					cortexpb.API),
 			},
 			oooTimeWindow: 5 * time.Minute,
 			expectedErr:   httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.ErrTooOldSample, model.Time(1575043969-(600*1000)), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()),
@@ -510,19 +506,19 @@ func TestIngesterPRW2_Push(t *testing.T) {
 			`,
 		},
 		"ooo enabled, should succeed": {
-			reqs: []*cortexpbv2.WriteRequest{
-				cortexpbv2.ToWriteRequestV2(
+			reqs: []*cortexpb.WriteRequestV2{
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 2, Timestamp: 1575043969}},
+					[]cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}},
 					nil,
 					nil,
-					cortexpbv2.API),
-				cortexpbv2.ToWriteRequestV2(
+					cortexpb.API),
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 1, Timestamp: 1575043969 - (60 * 1000)}},
+					[]cortexpb.Sample{{Value: 1, TimestampMs: 1575043969 - (60 * 1000)}},
 					nil,
 					nil,
-					cortexpbv2.API),
+					cortexpb.API),
 			},
 			oooTimeWindow: 5 * time.Minute,
 			expectedIngested: []cortexpb.TimeSeries{
@@ -554,19 +550,19 @@ func TestIngesterPRW2_Push(t *testing.T) {
 			`,
 		},
 		"should soft fail on two different sample values at the same timestamp": {
-			reqs: []*cortexpbv2.WriteRequest{
-				cortexpbv2.ToWriteRequestV2(
+			reqs: []*cortexpb.WriteRequestV2{
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 2, Timestamp: 1575043969}},
+					[]cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}},
 					nil,
 					nil,
-					cortexpbv2.API),
-				cortexpbv2.ToWriteRequestV2(
+					cortexpb.API),
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 1, Timestamp: 1575043969}},
+					[]cortexpb.Sample{{Value: 1, TimestampMs: 1575043969}},
 					nil,
 					nil,
-					cortexpbv2.API),
+					cortexpb.API),
 			},
 			expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.NewDuplicateFloatErr(1575043969, 2, 1), model.Time(1575043969), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()),
 			expectedIngested: []cortexpb.TimeSeries{
@@ -602,16 +598,16 @@ func TestIngesterPRW2_Push(t *testing.T) {
 		},
 		"should soft fail on exemplar with unknown series": {
 			maxExemplars: 1,
-			reqs: []*cortexpbv2.WriteRequest{
+			reqs: []*cortexpb.WriteRequestV2{
 				// Ingesting an exemplar requires a sample to create the series first
 				// This is not done here.
 				{
 					Symbols: []string{"", "__name__", "test", "traceID", "123"},
-					Timeseries: []cortexpbv2.PreallocTimeseriesV2{
+					Timeseries: []cortexpb.PreallocTimeseriesV2{
 						{
-							TimeSeries: &cortexpbv2.TimeSeries{
+							TimeSeriesV2: &cortexpb.TimeSeriesV2{
 								LabelsRefs: []uint32{1, 2},
-								Exemplars: []cortexpbv2.Exemplar{
+								Exemplars: []cortexpb.ExemplarV2{
 									{
 										LabelsRefs: []uint32{3, 4},
 										Timestamp:  1000,
@@ -679,13 +675,13 @@ func TestIngesterPRW2_Push(t *testing.T) {
 			`,
 		},
 		"should succeed when only native histogram present if enabled": {
-			reqs: []*cortexpbv2.WriteRequest{
-				cortexpbv2.ToWriteRequestV2(
+			reqs: []*cortexpb.WriteRequestV2{
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
 					nil,
-					[]cortexpbv2.Histogram{testHistogramV2},
+					[]cortexpb.Histogram{testHistogram},
 					nil,
-					cortexpbv2.API),
+					cortexpb.API),
 			},
 			expectedErr: nil,
 			expectedIngested: []cortexpb.TimeSeries{
@@ -729,13 +725,13 @@ func TestIngesterPRW2_Push(t *testing.T) {
 			`,
 		},
 		"should succeed when only float native histogram present if enabled": {
-			reqs: []*cortexpbv2.WriteRequest{
-				cortexpbv2.ToWriteRequestV2(
+			reqs: []*cortexpb.WriteRequestV2{
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
 					nil,
-					[]cortexpbv2.Histogram{testFloatHistogramV2},
+					[]cortexpb.Histogram{testFloatHistogram},
 					nil,
-					cortexpbv2.API),
+					cortexpb.API),
 			},
 			expectedErr: nil,
 			expectedIngested: []cortexpb.TimeSeries{
@@ -779,13 +775,13 @@ func TestIngesterPRW2_Push(t *testing.T) {
 			`,
 		},
 		"should fail to ingest histogram due to OOO native histogram. Sample and histogram has same timestamp but sample got ingested first": {
-			reqs: []*cortexpbv2.WriteRequest{
-				cortexpbv2.ToWriteRequestV2(
+			reqs: []*cortexpb.WriteRequestV2{
+				cortexpb.ToWriteRequestV2(
 					[]labels.Labels{metricLabels},
-					[]cortexpbv2.Sample{{Value: 2, Timestamp: 10}},
-					[]cortexpbv2.Histogram{testHistogramV2},
+					[]cortexpb.Sample{{Value: 2, TimestampMs: 10}},
+					[]cortexpb.Histogram{testHistogram},
 					nil,
-					cortexpbv2.API),
+					cortexpb.API),
 			},
 			expectedErr: nil,
 			expectedIngested: []cortexpb.TimeSeries{
@@ -953,35 +949,35 @@ func TestIngesterPRW2_MetricLimitExceeded(t *testing.T) {
 
 			userID := "1"
 			labels1 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}
-			sample1 := cortexpbv2.Sample{
-				Timestamp: 0,
-				Value:     1,
+			sample1 := cortexpb.Sample{
+				TimestampMs: 0,
+				Value:       1,
 			}
-			sample2 := cortexpbv2.Sample{
-				Timestamp: 1,
-				Value:     2,
+			sample2 := cortexpb.Sample{
+				TimestampMs: 1,
+				Value:       2,
 			}
 			labels3 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "biz"}}
-			sample3 := cortexpbv2.Sample{
-				Timestamp: 1,
-				Value:     3,
+			sample3 := cortexpb.Sample{
+				TimestampMs: 1,
+				Value:       3,
 			}
 
 			// Append only one series and one metadata first, expect no error.
 			ctx := user.InjectOrgID(context.Background(), userID)
-			_, err := ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1}, []cortexpbv2.Sample{sample1}, nil, []cortexpbv2.Metadata{{HelpRef: 5, Type: cortexpbv2.METRIC_TYPE_COUNTER}}, cortexpbv2.API, "a help for testmetric"))
+			_, err := ing.PushV2(ctx, cortexpb.ToWriteRequestV2([]labels.Labels{labels1}, []cortexpb.Sample{sample1}, nil, []cortexpb.MetadataV2{{HelpRef: 5, Type: cortexpb.COUNTER}}, cortexpb.API, "a help for testmetric"))
 			require.NoError(t, err)
 
 			testLimits := func() {
 				// Append two series, expect series-exceeded error.
-				_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1, labels3}, []cortexpbv2.Sample{sample2, sample3}, nil, nil, cortexpbv2.API))
+				_, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2([]labels.Labels{labels1, labels3}, []cortexpb.Sample{sample2, sample3}, nil, nil, cortexpb.API))
 				httpResp, ok := httpgrpc.HTTPResponseFromError(err)
 				require.True(t, ok, "returned error is not an httpgrpc response")
 				assert.Equal(t, http.StatusBadRequest, int(httpResp.Code))
 				assert.Equal(t, wrapWithUser(makeMetricLimitError(perMetricSeriesLimit, labels3, ing.limiter.FormatError(userID, errMaxSeriesPerMetricLimitExceeded)), userID).Error(), string(httpResp.Body))
 
 				// Append two metadata for the same metric. Drop the second one, and expect no error since metadata is a best effort approach.
-				_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1, labels3}, nil, nil, []cortexpbv2.Metadata{{HelpRef: 6, Type: cortexpbv2.METRIC_TYPE_COUNTER}, {HelpRef: 7, Type: cortexpbv2.METRIC_TYPE_COUNTER}}, cortexpbv2.API, "a help for testmetric", "a help for testmetric2"))
+				_, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2([]labels.Labels{labels1, labels3}, nil, nil, []cortexpb.MetadataV2{{HelpRef: 6, Type: cortexpb.COUNTER}, {HelpRef: 7, Type: cortexpb.COUNTER}}, cortexpb.API, "a help for testmetric", "a help for testmetric2"))
 				require.NoError(t, err)
 
 				// Read samples back via ingester queries.
@@ -994,11 +990,11 @@ func TestIngesterPRW2_MetricLimitExceeded(t *testing.T) {
 						Metric: cortexpb.FromLabelAdaptersToMetric(cortexpb.FromLabelsToLabelAdapters(labels1)),
 						Values: []model.SamplePair{
 							{
-								Timestamp: model.Time(sample1.Timestamp),
+								Timestamp: model.Time(sample1.TimestampMs),
 								Value:     model.SampleValue(sample1.Value),
 							},
 							{
-								Timestamp: model.Time(sample2.Timestamp),
+								Timestamp: model.Time(sample2.TimestampMs),
 								Value:     model.SampleValue(sample2.Value),
 							},
 						},
@@ -1058,35 +1054,35 @@ func TestIngesterPRW2_UserLimitExceeded(t *testing.T) {
 			userID := "1"
 			// Series
 			labels1 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}
-			sample1 := cortexpbv2.Sample{
-				Timestamp: 0,
-				Value:     1,
+			sample1 := cortexpb.Sample{
+				TimestampMs: 0,
+				Value:       1,
 			}
-			sample2 := cortexpbv2.Sample{
-				Timestamp: 1,
-				Value:     2,
+			sample2 := cortexpb.Sample{
+				TimestampMs: 1,
+				Value:       2,
 			}
 			labels3 := labels.Labels{{Name: labels.MetricName, Value: "testmetric2"}, {Name: "foo", Value: "biz"}}
-			sample3 := cortexpbv2.Sample{
-				Timestamp: 1,
-				Value:     3,
+			sample3 := cortexpb.Sample{
+				TimestampMs: 1,
+				Value:       3,
 			}
 
 			// Append only one series and one metadata first, expect no error.
 			ctx := user.InjectOrgID(context.Background(), userID)
-			_, err := ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1}, []cortexpbv2.Sample{sample1}, nil, []cortexpbv2.Metadata{{HelpRef: 5, Type: cortexpbv2.METRIC_TYPE_COUNTER}}, cortexpbv2.API, "a help for testmetric"))
+			_, err := ing.PushV2(ctx, cortexpb.ToWriteRequestV2([]labels.Labels{labels1}, []cortexpb.Sample{sample1}, nil, []cortexpb.MetadataV2{{HelpRef: 5, Type: cortexpb.COUNTER}}, cortexpb.API, "a help for testmetric"))
 			require.NoError(t, err)
 
 			testLimits := func() {
 				// Append to two series, expect series-exceeded error.
-				_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1, labels3}, []cortexpbv2.Sample{sample2, sample3}, nil, nil, cortexpbv2.API))
+				_, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2([]labels.Labels{labels1, labels3}, []cortexpb.Sample{sample2, sample3}, nil, nil, cortexpb.API))
 				httpResp, ok := httpgrpc.HTTPResponseFromError(err)
 				require.True(t, ok, "returned error is not an httpgrpc response")
 				assert.Equal(t, http.StatusBadRequest, int(httpResp.Code))
 				assert.Equal(t, wrapWithUser(makeLimitError(perUserSeriesLimit, ing.limiter.FormatError(userID, errMaxSeriesPerUserLimitExceeded)), userID).Error(), string(httpResp.Body))
 
 				// Append two metadata, expect no error since metadata is a best effort approach.
-				_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1, labels3}, nil, nil, []cortexpbv2.Metadata{{HelpRef: 7, Type: cortexpbv2.METRIC_TYPE_COUNTER}, {HelpRef: 8, Type: cortexpbv2.METRIC_TYPE_COUNTER}}, cortexpbv2.API, "a help for testmetric", "a help for testmetric2"))
+				_, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2([]labels.Labels{labels1, labels3}, nil, nil, []cortexpb.MetadataV2{{HelpRef: 7, Type: cortexpb.COUNTER}, {HelpRef: 8, Type: cortexpb.COUNTER}}, cortexpb.API, "a help for testmetric", "a help for testmetric2"))
 				require.NoError(t, err)
 
 				// Read samples back via ingester queries.
@@ -1098,11 +1094,11 @@ func TestIngesterPRW2_UserLimitExceeded(t *testing.T) {
 						Metric: cortexpb.FromLabelAdaptersToMetric(cortexpb.FromLabelsToLabelAdapters(labels1)),
 						Values: []model.SamplePair{
 							{
-								Timestamp: model.Time(sample1.Timestamp),
+								Timestamp: model.Time(sample1.TimestampMs),
 								Value:     model.SampleValue(sample1.Value),
 							},
 							{
-								Timestamp: model.Time(sample2.Timestamp),
+								Timestamp: model.Time(sample2.TimestampMs),
 								Value:     model.SampleValue(sample2.Value),
 							},
 						},
@@ -1176,7 +1172,7 @@ func TestIngesterPRW2_PerLabelsetLimitExceeded(t *testing.T) {
 	})
 
 	ctx := user.InjectOrgID(context.Background(), userID)
-	samples := []cortexpbv2.Sample{{Value: 2, Timestamp: 10}}
+	samples := []cortexpb.Sample{{Value: 2, TimestampMs: 10}}
 
 	// Create first series within the limits
 	for _, set := range limits.LimitsPerLabelSet {
@@ -1185,8 +1181,8 @@ func TestIngesterPRW2_PerLabelsetLimitExceeded(t *testing.T) {
 			lbls = append(lbls, lbl.Name, lbl.Value)
 		}
 		for i := 0; i < set.Limits.MaxSeries; i++ {
-			_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2(
-				[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpbv2.API))
+			_, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2(
+				[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpb.API))
 			require.NoError(t, err)
 		}
 	}
@@ -1209,8 +1205,8 @@ func TestIngesterPRW2_PerLabelsetLimitExceeded(t *testing.T) {
 		for _, lbl := range set.LabelSet {
 			lbls = append(lbls, lbl.Name, lbl.Value)
 		}
-		_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2(
-			[]labels.Labels{labels.FromStrings(append(lbls, "newLabel", "newValue")...)}, samples, nil, nil, cortexpbv2.API))
+		_, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2(
+			[]labels.Labels{labels.FromStrings(append(lbls, "newLabel", "newValue")...)}, samples, nil, nil, cortexpb.API))
 		httpResp, ok := httpgrpc.HTTPResponseFromError(err)
 		require.True(t, ok, "returned error is not an httpgrpc response")
 		assert.Equal(t, http.StatusBadRequest, int(httpResp.Code))
@@ -1288,22 +1284,22 @@ func TestIngesterPRW2_PerLabelsetLimitExceeded(t *testing.T) {
 	// Adding 5 metrics with only 1 label
 	for i := 0; i < 5; i++ {
 		lbls := []string{labels.MetricName, "metric_name", "comp1", "compValue1"}
-		_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2(
-			[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpbv2.API))
+		_, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2(
+			[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpb.API))
 		require.NoError(t, err)
 	}
 
 	// Adding 2 metrics with both labels (still below the limit)
 	lbls := []string{labels.MetricName, "metric_name", "comp1", "compValue1", "comp2", "compValue2"}
 	for i := 0; i < 2; i++ {
-		_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2(
-			[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpbv2.API))
+		_, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2(
+			[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpb.API))
 		require.NoError(t, err)
 	}
 
 	// Now we should hit the limit as we already have 2 metrics with comp1=compValue1, comp2=compValue2
-	_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2(
-		[]labels.Labels{labels.FromStrings(append(lbls, "newLabel", "newValue")...)}, samples, nil, nil, cortexpbv2.API))
+	_, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2(
+		[]labels.Labels{labels.FromStrings(append(lbls, "newLabel", "newValue")...)}, samples, nil, nil, cortexpb.API))
 	httpResp, ok := httpgrpc.HTTPResponseFromError(err)
 	require.True(t, ok, "returned error is not an httpgrpc response")
 	assert.Equal(t, http.StatusBadRequest, int(httpResp.Code))
@@ -1348,12 +1344,12 @@ func TestIngesterPRW2_PerLabelsetLimitExceeded(t *testing.T) {
 	tenantLimits.setLimits(userID, &limits)
 
 	lbls = []string{labels.MetricName, "metric_name", "comp2", "compValue2"}
-	_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2(
-		[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", "extraValueUpdate")...)}, samples, nil, nil, cortexpbv2.API))
+	_, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2(
+		[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", "extraValueUpdate")...)}, samples, nil, nil, cortexpb.API))
 	require.NoError(t, err)
 
-	_, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2(
-		[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", "extraValueUpdate2")...)}, samples, nil, nil, cortexpbv2.API))
+	_, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2(
+		[]labels.Labels{labels.FromStrings(append(lbls, "extraLabel", "extraValueUpdate2")...)}, samples, nil, nil, cortexpb.API))
 	httpResp, ok = httpgrpc.HTTPResponseFromError(err)
 	require.True(t, ok, "returned error is not an httpgrpc response")
 	assert.Equal(t, http.StatusBadRequest, int(httpResp.Code))
@@ -1424,13 +1420,13 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) {
 	metricLabels := cortexpb.FromLabelAdaptersToLabels(metricLabelAdapters)
 	for _, tc := range []struct {
 		name        string
-		histograms  []cortexpbv2.Histogram
+		histograms  []cortexpb.Histogram
 		expectedErr error
 	}{
 		{
 			name: "rejects histogram with NaN observations that has its Count (2) lower than the actual total of buckets (2 + 1)",
-			histograms: []cortexpbv2.Histogram{
-				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+			histograms: []cortexpb.Histogram{
+				cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{
 					ZeroCount:       2,
 					Count:           2,
 					Sum:             math.NaN(),
@@ -1442,8 +1438,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) {
 		},
 		{
 			name: "rejects histogram without NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)",
-			histograms: []cortexpbv2.Histogram{
-				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+			histograms: []cortexpb.Histogram{
+				cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{
 					ZeroCount:       2,
 					Count:           4,
 					Sum:             333,
@@ -1455,8 +1451,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) {
 		},
 		{
 			name: "rejects histogram that has too few negative buckets",
-			histograms: []cortexpbv2.Histogram{
-				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+			histograms: []cortexpb.Histogram{
+				cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{
 					NegativeSpans:   []histogram.Span{{Offset: 0, Length: 1}},
 					NegativeBuckets: []int64{},
 				}),
@@ -1465,8 +1461,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) {
 		},
 		{
 			name: "rejects histogram that has too few positive buckets",
-			histograms: []cortexpbv2.Histogram{
-				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+			histograms: []cortexpb.Histogram{
+				cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{
 					PositiveSpans:   []histogram.Span{{Offset: 0, Length: 1}},
 					PositiveBuckets: []int64{},
 				}),
@@ -1475,8 +1471,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) {
 		},
 		{
 			name: "rejects histogram that has too many negative buckets",
-			histograms: []cortexpbv2.Histogram{
-				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+			histograms: []cortexpb.Histogram{
+				cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{
 					NegativeSpans:   []histogram.Span{{Offset: 0, Length: 1}},
 					NegativeBuckets: []int64{1, 2},
 				}),
@@ -1485,8 +1481,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) {
 		},
 		{
 			name: "rejects histogram that has too many positive buckets",
-			histograms: []cortexpbv2.Histogram{
-				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+			histograms: []cortexpb.Histogram{
+				cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{
 					PositiveSpans:   []histogram.Span{{Offset: 0, Length: 1}},
 					PositiveBuckets: []int64{1, 2},
 				}),
@@ -1495,8 +1491,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) {
 		},
 		{
 			name: "rejects a histogram that has a negative span with a negative offset",
-			histograms: []cortexpbv2.Histogram{
-				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+			histograms: []cortexpb.Histogram{
+				cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{
 					NegativeSpans:   []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}},
 					NegativeBuckets: []int64{1, 2},
 				}),
@@ -1505,8 +1501,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) {
 		},
 		{
 			name: "rejects a histogram that has a positive span with a negative offset",
-			histograms: []cortexpbv2.Histogram{
-				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+			histograms: []cortexpb.Histogram{
+				cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{
 					PositiveSpans:   []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}},
 					PositiveBuckets: []int64{1, 2},
 				}),
@@ -1515,8 +1511,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) {
 		},
 		{
 			name: "rejects a histogram that has a negative span with a negative count",
-			histograms: []cortexpbv2.Histogram{
-				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+			histograms: []cortexpb.Histogram{
+				cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{
 					NegativeSpans:   []histogram.Span{{Offset: -1, Length: 1}},
 					NegativeBuckets: []int64{-1},
 				}),
@@ -1525,8 +1521,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) {
 		},
 		{
 			name: "rejects a histogram that has a positive span with a negative count",
-			histograms: []cortexpbv2.Histogram{
-				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+			histograms: []cortexpb.Histogram{
+				cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{
 					PositiveSpans:   []histogram.Span{{Offset: -1, Length: 1}},
 					PositiveBuckets: []int64{-1},
 				}),
@@ -1535,8 +1531,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) {
 		},
 		{
 			name: "rejects a histogram that has a lower count than count in buckets",
-			histograms: []cortexpbv2.Histogram{
-				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+			histograms: []cortexpb.Histogram{
+				cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{
 					Count:           0,
 					NegativeSpans:   []histogram.Span{{Offset: -1, Length: 1}},
 					PositiveSpans:   []histogram.Span{{Offset: -1, Length: 1}},
@@ -1548,8 +1544,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) {
 		},
 		{
 			name: "rejects a histogram that doesn't count the zero bucket in its count",
-			histograms: []cortexpbv2.Histogram{
-				cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{
+			histograms: []cortexpb.Histogram{
+				cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{
 					Count:           2,
 					ZeroCount:       1,
 					NegativeSpans:   []histogram.Span{{Offset: -1, Length: 1}},
@@ -1581,7 +1577,7 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) {
 				return i.lifecycler.GetState()
 			})
 
-			req := cortexpbv2.ToWriteRequestV2([]labels.Labels{metricLabels}, nil, tc.histograms, nil, cortexpbv2.API)
+			req := cortexpb.ToWriteRequestV2([]labels.Labels{metricLabels}, nil, tc.histograms, nil, cortexpb.API)
 			// Push timeseries
 			_, err = i.PushV2(ctx, req)
 			assert.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(tc.expectedErr, model.Time(10), metricLabelAdapters), userID).Error()), err)
@@ -1622,19 +1618,19 @@ func TestIngesterPRW2_Push_ShouldCorrectlyTrackMetricsInMultiTenantScenario(t *t
 
 	// Push timeseries for each user
 	for _, userID := range []string{"test-1", "test-2"} {
-		reqs := []*cortexpbv2.WriteRequest{
-			cortexpbv2.ToWriteRequestV2(
+		reqs := []*cortexpb.WriteRequestV2{
+			cortexpb.ToWriteRequestV2(
 				[]labels.Labels{metricLabels},
-				[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+				[]cortexpb.Sample{{Value: 1, TimestampMs: 9}},
 				nil,
 				nil,
-				cortexpbv2.API),
-			cortexpbv2.ToWriteRequestV2(
+				cortexpb.API),
+			cortexpb.ToWriteRequestV2(
 				[]labels.Labels{metricLabels},
-				[]cortexpbv2.Sample{{Value: 2, Timestamp: 10}},
+				[]cortexpb.Sample{{Value: 2, TimestampMs: 10}},
 				nil,
 				nil,
-				cortexpbv2.API),
+				cortexpb.API),
 		}
 
 		for _, req := range reqs {
@@ -1706,19 +1702,19 @@ func TestIngesterPRW2_Push_DecreaseInactiveSeries(t *testing.T) {
 
 	// Push timeseries for each user
 	for _, userID := range []string{"test-1", "test-2"} {
-		reqs := []*cortexpbv2.WriteRequest{
-			cortexpbv2.ToWriteRequestV2(
+		reqs := []*cortexpb.WriteRequestV2{
+			cortexpb.ToWriteRequestV2(
 				[]labels.Labels{metricLabels},
-				[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+				[]cortexpb.Sample{{Value: 1, TimestampMs: 9}},
 				nil,
 				nil,
-				cortexpbv2.API),
-			cortexpbv2.ToWriteRequestV2(
+				cortexpb.API),
+			cortexpb.ToWriteRequestV2(
 				[]labels.Labels{metricLabels},
-				[]cortexpbv2.Sample{{Value: 2, Timestamp: 10}},
+				[]cortexpb.Sample{{Value: 2, TimestampMs: 10}},
 				nil,
 				nil,
-				cortexpbv2.API),
+				cortexpb.API),
 		}
 
 		for _, req := range reqs {
@@ -1781,12 +1777,12 @@ func benchmarkIngesterPRW2Push(b *testing.B, limits validation.Limits, errorsExp
 	metricLabels := cortexpb.FromLabelAdaptersToLabels(metricLabelAdapters)
 	startTime := util.TimeToMillis(time.Now())
 
-	currTimeReq := cortexpbv2.ToWriteRequestV2(
+	currTimeReq := cortexpb.ToWriteRequestV2(
 		[]labels.Labels{metricLabels},
-		[]cortexpbv2.Sample{{Value: 1, Timestamp: startTime}},
+		[]cortexpb.Sample{{Value: 1, TimestampMs: startTime}},
 		nil,
 		nil,
-		cortexpbv2.API)
+		cortexpb.API)
 	_, err = ingester.PushV2(ctx, currTimeReq)
 	require.NoError(b, err)
 
@@ -1795,16 +1791,16 @@ func benchmarkIngesterPRW2Push(b *testing.B, limits validation.Limits, errorsExp
 		samples = 10
 	)
 
-	allLabels, allSamples := benchmarkDataV2(series)
+	allLabels, allSamples := benchmarkData(series)
 
 	b.ResetTimer()
 	for iter := 0; iter < b.N; iter++ {
 		// Bump the timestamp on each of our test samples each time round the loop
 		for j := 0; j < samples; j++ {
 			for i := range allSamples {
-				allSamples[i].Timestamp = startTime + int64(iter*samples+j+1)
+				allSamples[i].TimestampMs = startTime + int64(iter*samples+j+1)
 			}
-			_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(allLabels, allSamples, nil, nil, cortexpbv2.API))
+			_, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(allLabels, allSamples, nil, nil, cortexpb.API))
 			if !errorsExpected {
 				require.NoError(b, err)
 			}
@@ -1846,26 +1842,26 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) {
 		// If this returns false, test is skipped.
 		prepareConfig   func(limits *validation.Limits, instanceLimits *InstanceLimits) bool
 		beforeBenchmark func(b *testing.B, ingester *Ingester, numSeriesPerRequest int)
-		runBenchmark    func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample)
+		runBenchmark    func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample)
 	}{
 		"out of bound samples": {
 			prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { return true },
 			beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
 				// Push a single time series to set the TSDB min time.
-				currTimeReq := cortexpbv2.ToWriteRequestV2(
+				currTimeReq := cortexpb.ToWriteRequestV2(
 					[]labels.Labels{{{Name: labels.MetricName, Value: metricName}}},
-					[]cortexpbv2.Sample{{Value: 1, Timestamp: util.TimeToMillis(time.Now())}},
+					[]cortexpb.Sample{{Value: 1, TimestampMs: util.TimeToMillis(time.Now())}},
 					nil,
 					nil,
-					cortexpbv2.API)
+					cortexpb.API)
 				_, err := ingester.PushV2(ctx, currTimeReq)
 				require.NoError(b, err)
 			},
-			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) {
 				expectedErr := storage.ErrOutOfBounds.Error()
 				// Push out of bound samples.
 				for n := 0; n < b.N; n++ {
-					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) // nolint:errcheck
+					_, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck
 
 					verifyErrorString(b, err, expectedErr)
 				}
@@ -1876,18 +1872,18 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) {
 			beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
 				// For each series, push a single sample with a timestamp greater than next pushes.
 				for i := 0; i < numSeriesPerRequest; i++ {
-					currTimeReq := cortexpbv2.ToWriteRequestV2(
+					currTimeReq := cortexpb.ToWriteRequestV2(
 						[]labels.Labels{{{Name: labels.MetricName, Value: metricName}, {Name: "cardinality", Value: strconv.Itoa(i)}}},
-						[]cortexpbv2.Sample{{Value: 1, Timestamp: sampleTimestamp + 1}},
+						[]cortexpb.Sample{{Value: 1, TimestampMs: sampleTimestamp + 1}},
 						nil,
 						nil,
-						cortexpbv2.API)
+						cortexpb.API)
 
 					_, err := ingester.PushV2(ctx, currTimeReq)
 					require.NoError(b, err)
 				}
 			},
-			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) {
 				expectedErr := storage.ErrOutOfOrderSample.Error()
 
 				st := writev2.NewSymbolTable()
@@ -1896,7 +1892,7 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) {
 				}
 				// Push out of order samples.
 				for n := 0; n < b.N; n++ {
-					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) // nolint:errcheck
+					_, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck
 
 					verifyErrorString(b, err, expectedErr)
 				}
@@ -1909,16 +1905,16 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) {
 			},
 			beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
 				// Push a series with a metric name different than the one used during the benchmark.
-				currTimeReq := cortexpbv2.ToWriteRequestV2(
+				currTimeReq := cortexpb.ToWriteRequestV2(
 					[]labels.Labels{labels.FromStrings(labels.MetricName, "another")},
-					[]cortexpbv2.Sample{{Value: 1, Timestamp: sampleTimestamp + 1}},
+					[]cortexpb.Sample{{Value: 1, TimestampMs: sampleTimestamp + 1}},
 					nil,
 					nil,
-					cortexpbv2.API)
+					cortexpb.API)
 				_, err := ingester.PushV2(ctx, currTimeReq)
 				require.NoError(b, err)
 			},
-			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) {
 				// Push series with a different name than the one already pushed.
 
 				st := writev2.NewSymbolTable()
@@ -1926,7 +1922,7 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) {
 					st.SymbolizeLabels(lbs, nil)
 				}
 				for n := 0; n < b.N; n++ {
-					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) // nolint:errcheck
+					_, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck
 					verifyErrorString(b, err, "per-user series limit")
 				}
 			},
@@ -1938,23 +1934,23 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) {
 			},
 			beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
 				// Push a series with the same metric name but different labels than the one used during the benchmark.
-				currTimeReq := cortexpbv2.ToWriteRequestV2(
+				currTimeReq := cortexpb.ToWriteRequestV2(
 					[]labels.Labels{labels.FromStrings(labels.MetricName, metricName, "cardinality", "another")},
-					[]cortexpbv2.Sample{{Value: 1, Timestamp: sampleTimestamp + 1}},
+					[]cortexpb.Sample{{Value: 1, TimestampMs: sampleTimestamp + 1}},
 					nil,
 					nil,
-					cortexpbv2.API)
+					cortexpb.API)
 				_, err := ingester.PushV2(ctx, currTimeReq)
 				require.NoError(b, err)
 			},
-			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) {
 				st := writev2.NewSymbolTable()
 				for _, lbs := range metrics {
 					st.SymbolizeLabels(lbs, nil)
 				}
 				// Push series with different labels than the one already pushed.
 				for n := 0; n < b.N; n++ {
-					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) // nolint:errcheck
+					_, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck
 					verifyErrorString(b, err, "per-metric series limit")
 				}
 			},
@@ -1974,14 +1970,14 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) {
 
 				ingester.ingestionRate.Tick()
 			},
-			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) {
 				st := writev2.NewSymbolTable()
 				for _, lbs := range metrics {
 					st.SymbolizeLabels(lbs, nil)
 				}
 				// Push series with different labels than the one already pushed.
 				for n := 0; n < b.N; n++ {
-					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API))
+					_, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API))
 					verifyErrorString(b, err, "push rate reached")
 				}
 			},
@@ -2000,14 +1996,14 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) {
 				_, err := ingester.PushV2(ctx, generateSamplesForLabelV2(labels.FromStrings(labels.MetricName, "test"), 10000))
 				require.NoError(b, err)
 			},
-			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) {
 				st := writev2.NewSymbolTable()
 				for _, lbs := range metrics {
 					st.SymbolizeLabels(lbs, nil)
 				}
 				// Push series with different labels than the one already pushed.
 				for n := 0; n < b.N; n++ {
-					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API))
+					_, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API))
 					verifyErrorString(b, err, "max tenants limit reached")
 				}
 			},
@@ -2024,13 +2020,13 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) {
 				_, err := ingester.PushV2(ctx, generateSamplesForLabelV2(labels.FromStrings(labels.MetricName, "test"), 10000))
 				require.NoError(b, err)
 			},
-			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) {
 				st := writev2.NewSymbolTable()
 				for _, lbs := range metrics {
 					st.SymbolizeLabels(lbs, nil)
 				}
 				for n := 0; n < b.N; n++ {
-					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API))
+					_, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API))
 					verifyErrorString(b, err, "max series limit reached")
 				}
 			},
@@ -2046,13 +2042,13 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) {
 			beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) {
 				ingester.inflightPushRequests.Inc()
 			},
-			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) {
+			runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) {
 				st := writev2.NewSymbolTable()
 				for _, lbs := range metrics {
 					st.SymbolizeLabels(lbs, nil)
 				}
 				for n := 0; n < b.N; n++ {
-					_, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API))
+					_, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API))
 					verifyErrorString(b, err, "too many inflight push requests")
 				}
 			},
@@ -2100,10 +2096,10 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) {
 
 					// Prepare the request.
 					metrics := make([]labels.Labels, 0, scenario.numSeriesPerRequest)
-					samples := make([]cortexpbv2.Sample, 0, scenario.numSeriesPerRequest)
+					samples := make([]cortexpb.Sample, 0, scenario.numSeriesPerRequest)
 					for i := 0; i < scenario.numSeriesPerRequest; i++ {
 						metrics = append(metrics, labels.Labels{{Name: labels.MetricName, Value: metricName}, {Name: "cardinality", Value: strconv.Itoa(i)}})
-						samples = append(samples, cortexpbv2.Sample{Value: float64(i), Timestamp: sampleTimestamp})
+						samples = append(samples, cortexpb.Sample{Value: float64(i), TimestampMs: sampleTimestamp})
 					}
 
 					// Run the benchmark.
@@ -2459,7 +2455,7 @@ func TestIngesterPRW2_Push_ShouldNotCreateTSDBIfNotInActiveState(t *testing.T) {
 	// Mock request
 	userID := "test"
 	ctx := user.InjectOrgID(context.Background(), userID)
-	req := &cortexpbv2.WriteRequest{}
+	req := &cortexpb.WriteRequestV2{}
 
 	res, err := i.PushV2(ctx, req)
 	assert.Equal(t, wrapWithUser(fmt.Errorf(errTSDBCreateIncompatibleState, "PENDING"), userID).Error(), err.Error())
@@ -2739,7 +2735,7 @@ func TestIngesterPRW2_QueryStream(t *testing.T) {
 			ctx := user.InjectOrgID(context.Background(), userID)
 			lbls := labels.Labels{{Name: labels.MetricName, Value: "foo"}}
 			var (
-				req                    *cortexpbv2.WriteRequest
+				req                    *cortexpb.WriteRequestV2
 				expectedResponseChunks *client.QueryStreamResponse
 			)
 			switch enc {
@@ -2822,12 +2818,12 @@ func TestIngesterPRW2_QueryStreamManySamplesChunks(t *testing.T) {
 	ctx := user.InjectOrgID(context.Background(), userID)
 
 	const samplesCount = 1000000
-	samples := make([]cortexpbv2.Sample, 0, samplesCount)
+	samples := make([]cortexpb.Sample, 0, samplesCount)
 
 	for i := 0; i < samplesCount; i++ {
-		samples = append(samples, cortexpbv2.Sample{
-			Value:     float64(i),
-			Timestamp: int64(i),
+		samples = append(samples, cortexpb.Sample{
+			Value:       float64(i),
+			TimestampMs: int64(i),
 		})
 	}
 
@@ -2928,12 +2924,12 @@ func benchmarkQueryStreamV2(b *testing.B) {
 	ctx := user.InjectOrgID(context.Background(), userID)
 
 	const samplesCount = 1000
-	samples := make([]cortexpbv2.Sample, 0, samplesCount)
+	samples := make([]cortexpb.Sample, 0, samplesCount)
 
 	for i := 0; i < samplesCount; i++ {
-		samples = append(samples, cortexpbv2.Sample{
-			Value:     float64(i),
-			Timestamp: int64(i),
+		samples = append(samples, cortexpb.Sample{
+			Value:       float64(i),
+			TimestampMs: int64(i),
 		})
 	}
 
@@ -3995,22 +3991,22 @@ func TestIngesterPRW2_NoFlushWithInFlightRequest(t *testing.T) {
 func TestIngesterPRW2_PushInstanceLimits(t *testing.T) {
 	tests := map[string]struct {
 		limits          InstanceLimits
-		reqs            map[string][]*cortexpbv2.WriteRequest
+		reqs            map[string][]*cortexpb.WriteRequestV2
 		expectedErr     error
 		expectedErrType interface{}
 	}{
 		"should succeed creating one user and series": {
 			limits: InstanceLimits{MaxInMemorySeries: 1, MaxInMemoryTenants: 1},
-			reqs: map[string][]*cortexpbv2.WriteRequest{
+			reqs: map[string][]*cortexpb.WriteRequestV2{
 				"test": {
-					cortexpbv2.ToWriteRequestV2(
+					cortexpb.ToWriteRequestV2(
 						[]labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}})},
-						[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+						[]cortexpb.Sample{{Value: 1, TimestampMs: 9}},
 						nil,
-						[]cortexpbv2.Metadata{
-							{Type: cortexpbv2.METRIC_TYPE_COUNTER, HelpRef: 3},
+						[]cortexpb.MetadataV2{
+							{Type: cortexpb.COUNTER, HelpRef: 3},
 						},
-						cortexpbv2.API,
+						cortexpb.API,
 						"a help for metric_name_1"),
 				},
 			},
@@ -4019,21 +4015,21 @@ func TestIngesterPRW2_PushInstanceLimits(t *testing.T) {
 
 		"should fail creating two series": {
 			limits: InstanceLimits{MaxInMemorySeries: 1, MaxInMemoryTenants: 1},
-			reqs: map[string][]*cortexpbv2.WriteRequest{
+			reqs: map[string][]*cortexpb.WriteRequestV2{
 				"test": {
-					cortexpbv2.ToWriteRequestV2(
+					cortexpb.ToWriteRequestV2(
 						[]labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test1"}})},
-						[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+						[]cortexpb.Sample{{Value: 1, TimestampMs: 9}},
 						nil,
 						nil,
-						cortexpbv2.API),
+						cortexpb.API),
 
-					cortexpbv2.ToWriteRequestV2(
+					cortexpb.ToWriteRequestV2(
 						[]labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test2"}})}, // another series
-						[]cortexpbv2.Sample{{Value: 1, Timestamp: 10}},
+						[]cortexpb.Sample{{Value: 1, TimestampMs: 10}},
 						nil,
 						nil,
-						cortexpbv2.API),
+						cortexpb.API),
 				},
 			},
 
@@ -4042,23 +4038,23 @@ func TestIngesterPRW2_PushInstanceLimits(t *testing.T) {
 
 		"should fail creating two users": {
 			limits: InstanceLimits{MaxInMemorySeries: 1, MaxInMemoryTenants: 1},
-			reqs: map[string][]*cortexpbv2.WriteRequest{
+			reqs: map[string][]*cortexpb.WriteRequestV2{
 				"user1": {
-					cortexpbv2.ToWriteRequestV2(
+					cortexpb.ToWriteRequestV2(
 						[]labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test1"}})},
-						[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+						[]cortexpb.Sample{{Value: 1, TimestampMs: 9}},
 						nil,
 						nil,
-						cortexpbv2.API),
+						cortexpb.API),
 				},
 
 				"user2": {
-					cortexpbv2.ToWriteRequestV2(
+					cortexpb.ToWriteRequestV2(
 						[]labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test2"}})}, // another series
-						[]cortexpbv2.Sample{{Value: 1, Timestamp: 10}},
+						[]cortexpb.Sample{{Value: 1, TimestampMs: 10}},
 						nil,
 						nil,
-						cortexpbv2.API),
+						cortexpb.API),
 				},
 			},
 			expectedErr: wrapWithUser(errMaxUsersLimitReached, "user2"),
@@ -4066,21 +4062,21 @@ func TestIngesterPRW2_PushInstanceLimits(t *testing.T) {
 
 		"should fail pushing samples in two requests due to rate limit": {
 			limits: InstanceLimits{MaxInMemorySeries: 1, MaxInMemoryTenants: 1, MaxIngestionRate: 0.001},
-			reqs: map[string][]*cortexpbv2.WriteRequest{
+			reqs: map[string][]*cortexpb.WriteRequestV2{
 				"user1": {
-					cortexpbv2.ToWriteRequestV2(
+					cortexpb.ToWriteRequestV2(
 						[]labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test1"}})},
-						[]cortexpbv2.Sample{{Value: 1, Timestamp: 9}},
+						[]cortexpb.Sample{{Value: 1, TimestampMs: 9}},
 						nil,
 						nil,
-						cortexpbv2.API),
+						cortexpb.API),
 
-					cortexpbv2.ToWriteRequestV2(
+					cortexpb.ToWriteRequestV2(
 						[]labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test1"}})},
-						[]cortexpbv2.Sample{{Value: 1, Timestamp: 10}},
+						[]cortexpb.Sample{{Value: 1, TimestampMs: 10}},
 						nil,
 						nil,
-						cortexpbv2.API),
+						cortexpb.API),
 				},
 			},
 			expectedErr: errMaxSamplesPushRateLimitReached,
@@ -4229,30 +4225,30 @@ func TestIngesterPRW2_QueryExemplar_MaxInflightQueryRequest(t *testing.T) {
 	require.Equal(t, err, errTooManyInflightQueryRequests)
 }
 
-func generateSamplesForLabelV2(lbs labels.Labels, count int) *cortexpbv2.WriteRequest {
+func generateSamplesForLabelV2(lbs labels.Labels, count int) *cortexpb.WriteRequestV2 {
 	var lbls = make([]labels.Labels, 0, count)
-	var samples = make([]cortexpbv2.Sample, 0, count)
+	var samples = make([]cortexpb.Sample, 0, count)
 
 	for i := 0; i < count; i++ {
-		samples = append(samples, cortexpbv2.Sample{
-			Value:     float64(i),
-			Timestamp: int64(i),
+		samples = append(samples, cortexpb.Sample{
+			Value:       float64(i),
+			TimestampMs: int64(i),
 		})
 		lbls = append(lbls, lbs)
 	}
 
-	return cortexpbv2.ToWriteRequestV2(lbls, samples, nil, nil, cortexpbv2.API)
+	return cortexpb.ToWriteRequestV2(lbls, samples, nil, nil, cortexpb.API)
 }
 
-func mockWriteRequestWithMetadataV2(t *testing.T, lbls labels.Labels, value float64, timestamp int64, metadata cortexpbv2.Metadata, additionalSymbols ...string) (*cortexpbv2.WriteRequest, *client.QueryStreamResponse) {
-	samples := []cortexpbv2.Sample{
+func mockWriteRequestWithMetadataV2(t *testing.T, lbls labels.Labels, value float64, timestamp int64, metadata cortexpb.MetadataV2, additionalSymbols ...string) (*cortexpb.WriteRequestV2, *client.QueryStreamResponse) {
+	samples := []cortexpb.Sample{
 		{
-			Timestamp: timestamp,
-			Value:     value,
+			TimestampMs: timestamp,
+			Value:       value,
 		},
 	}
 
-	req := cortexpbv2.ToWriteRequestV2([]labels.Labels{lbls}, samples, nil, []cortexpbv2.Metadata{metadata}, cortexpbv2.API, additionalSymbols...)
+	req := cortexpb.ToWriteRequestV2([]labels.Labels{lbls}, samples, nil, []cortexpb.MetadataV2{metadata}, cortexpb.API, additionalSymbols...)
 
 	chunk := chunkenc.NewXORChunk()
 	app, err := chunk.Appender()
@@ -4279,23 +4275,23 @@ func mockWriteRequestWithMetadataV2(t *testing.T, lbls labels.Labels, value floa
 	return req, expectedQueryStreamResChunks
 }
 
-func mockHistogramWriteRequestV2(t *testing.T, lbls labels.Labels, value int, timestampMs int64, float bool) (*cortexpbv2.WriteRequest, *client.QueryStreamResponse) {
+func mockHistogramWriteRequestV2(t *testing.T, lbls labels.Labels, value int, timestampMs int64, float bool) (*cortexpb.WriteRequestV2, *client.QueryStreamResponse) {
 	var (
-		histograms []cortexpbv2.Histogram
+		histograms []cortexpb.Histogram
 		h          *histogram.Histogram
 		fh         *histogram.FloatHistogram
 		c          chunkenc.Chunk
 	)
 	if float {
 		fh = tsdbutil.GenerateTestFloatHistogram(value)
-		histograms = []cortexpbv2.Histogram{
-			cortexpbv2.FloatHistogramToHistogramProto(timestampMs, fh),
+		histograms = []cortexpb.Histogram{
+			cortexpb.FloatHistogramToHistogramProto(timestampMs, fh),
 		}
 		c = chunkenc.NewFloatHistogramChunk()
 	} else {
 		h = tsdbutil.GenerateTestHistogram(value)
-		histograms = []cortexpbv2.Histogram{
-			cortexpbv2.HistogramToHistogramProto(timestampMs, h),
+		histograms = []cortexpb.Histogram{
+			cortexpb.HistogramToHistogramProto(timestampMs, h),
 		}
 		c = chunkenc.NewHistogramChunk()
 	}
@@ -4310,7 +4306,7 @@ func mockHistogramWriteRequestV2(t *testing.T, lbls labels.Labels, value int, ti
 	require.NoError(t, err)
 	c.Compact()
 
-	req := cortexpbv2.ToWriteRequestV2([]labels.Labels{lbls}, nil, histograms, nil, cortexpbv2.API)
+	req := cortexpb.ToWriteRequestV2([]labels.Labels{lbls}, nil, histograms, nil, cortexpb.API)
 	enc := int32(encoding.PrometheusHistogramChunk)
 	if float {
 		enc = int32(encoding.PrometheusFloatHistogramChunk)
@@ -4334,15 +4330,15 @@ func mockHistogramWriteRequestV2(t *testing.T, lbls labels.Labels, value int, ti
 	return req, expectedQueryStreamResChunks
 }
 
-func mockWriteRequestV2(t *testing.T, lbls labels.Labels, value float64, timestamp int64) (*cortexpbv2.WriteRequest, *client.QueryStreamResponse) {
-	samples := []cortexpbv2.Sample{
+func mockWriteRequestV2(t *testing.T, lbls labels.Labels, value float64, timestamp int64) (*cortexpb.WriteRequestV2, *client.QueryStreamResponse) {
+	samples := []cortexpb.Sample{
 		{
-			Timestamp: timestamp,
-			Value:     value,
+			TimestampMs: timestamp,
+			Value:       value,
 		},
 	}
 
-	req := cortexpbv2.ToWriteRequestV2([]labels.Labels{lbls}, samples, nil, nil, cortexpbv2.API)
+	req := cortexpb.ToWriteRequestV2([]labels.Labels{lbls}, samples, nil, nil, cortexpb.API)
 
 	chunk := chunkenc.NewXORChunk()
 	app, err := chunk.Appender()
@@ -4371,8 +4367,8 @@ func mockWriteRequestV2(t *testing.T, lbls labels.Labels, value float64, timesta
 
 func pushSingleSampleWithMetadataV2(t *testing.T, i *Ingester) {
 	ctx := user.InjectOrgID(context.Background(), userID)
-	metadata := cortexpbv2.Metadata{
-		Type:    cortexpbv2.METRIC_TYPE_COUNTER,
+	metadata := cortexpb.MetadataV2{
+		Type:    cortexpb.COUNTER,
 		HelpRef: 3,
 		UnitRef: 0,
 	}
@@ -4389,16 +4385,16 @@ func pushSingleSampleAtTimeV2(t *testing.T, i *Ingester, ts int64) {
 	require.NoError(t, err)
 }
 
-func writeRequestSingleSeriesV2(lbls labels.Labels, samples []cortexpbv2.Sample) *cortexpbv2.WriteRequest {
-	req := &cortexpbv2.WriteRequest{
-		Source: cortexpbv2.API,
+func writeRequestSingleSeriesV2(lbls labels.Labels, samples []cortexpb.Sample) *cortexpb.WriteRequestV2 {
+	req := &cortexpb.WriteRequestV2{
+		Source: cortexpb.API,
 	}
 
 	st := writev2.NewSymbolTable()
-	ts := cortexpbv2.TimeSeries{}
+	ts := cortexpb.TimeSeriesV2{}
 	ts.Samples = samples
 	ts.LabelsRefs = st.SymbolizeLabels(lbls, nil)
-	req.Timeseries = append(req.Timeseries, cortexpbv2.PreallocTimeseriesV2{TimeSeries: &ts})
+	req.Timeseries = append(req.Timeseries, cortexpb.PreallocTimeseriesV2{TimeSeriesV2: &ts})
 	req.Symbols = st.Symbols()
 
 	return req
@@ -4430,21 +4426,21 @@ func createIngesterWithSeriesV2(t testing.TB, userID string, numSeries, numSampl
 
 			// Generate metrics and samples (1 for each series).
 			metrics := make([]labels.Labels, 0, batchSize)
-			samples := make([]cortexpbv2.Sample, 0, batchSize)
+			samples := make([]cortexpb.Sample, 0, batchSize)
 
 			for s := 0; s < batchSize; s++ {
 				metrics = append(metrics, labels.Labels{
 					{Name: labels.MetricName, Value: fmt.Sprintf("test_%d", o+s)},
 				})
 
-				samples = append(samples, cortexpbv2.Sample{
-					Timestamp: ts,
-					Value:     1,
+				samples = append(samples, cortexpb.Sample{
+					TimestampMs: ts,
+					Value:       1,
 				})
 			}
 
 			// Send metrics to the ingester.
-			req := cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)
+			req := cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)
 			_, err := i.PushV2(ctx, req)
 			require.NoError(t, err)
 		}
@@ -4452,17 +4448,3 @@ func createIngesterWithSeriesV2(t testing.TB, userID string, numSeries, numSampl
 
 	return i
 }
-
-func benchmarkDataV2(nSeries int) (allLabels []labels.Labels, allSamples []cortexpbv2.Sample) {
-	for j := 0; j < nSeries; j++ {
-		labels := chunk.BenchmarkLabels.Copy()
-		for i := range labels {
-			if labels[i].Name == "cpu" {
-				labels[i].Value = fmt.Sprintf("cpu%02d", j)
-			}
-		}
-		allLabels = append(allLabels, labels)
-		allSamples = append(allSamples, cortexpbv2.Sample{Timestamp: 0, Value: float64(j)})
-	}
-	return
-}
diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go
index 4a2916c2af..c4fa7e92f6 100644
--- a/pkg/ruler/compat.go
+++ b/pkg/ruler/compat.go
@@ -22,7 +22,6 @@ import (
 	"github.com/weaveworks/common/user"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/querier"
 	"github.com/cortexproject/cortex/pkg/querier/stats"
 	"github.com/cortexproject/cortex/pkg/ring/client"
@@ -34,7 +33,7 @@ import (
 // Pusher is an ingester server that accepts pushes.
 type Pusher interface {
 	Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error)
-	PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error)
+	PushV2(ctx context.Context, req *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error)
 }
 
 type PusherAppender struct {
diff --git a/pkg/util/push/otlp_test.go b/pkg/util/push/otlp_test.go
index 40b42f3fee..6032130283 100644
--- a/pkg/util/push/otlp_test.go
+++ b/pkg/util/push/otlp_test.go
@@ -352,7 +352,7 @@ func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest {
 	return pmetricotlp.NewExportRequestFromMetrics(d)
 }
 
-func verifyOTLPWriteRequestHandler(t *testing.T, expectSource cortexpb.WriteRequest_SourceEnum) func(ctx context.Context, request *cortexpb.WriteRequest) (response *cortexpb.WriteResponse, err error) {
+func verifyOTLPWriteRequestHandler(t *testing.T, expectSource cortexpb.SourceEnum) func(ctx context.Context, request *cortexpb.WriteRequest) (response *cortexpb.WriteResponse, err error) {
 	t.Helper()
 	return func(ctx context.Context, request *cortexpb.WriteRequest) (response *cortexpb.WriteResponse, err error) {
 		assert.Len(t, request.Timeseries, 13) // 1 (target_info) + 1 (counter) + 1 (gauge) + 7 (hist_bucket) + 2 (hist_sum, hist_count) + 1 (exponential histogram)
diff --git a/pkg/util/push/push.go b/pkg/util/push/push.go
index c115f559ed..d8cbd79e88 100644
--- a/pkg/util/push/push.go
+++ b/pkg/util/push/push.go
@@ -14,7 +14,6 @@ import (
 	"github.com/weaveworks/common/middleware"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/util"
 	"github.com/cortexproject/cortex/pkg/util/log"
 )
@@ -36,7 +35,7 @@ const (
 type Func func(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error)
 
 // FuncV2 defines the type of the pushV2. It is similar to http.HandlerFunc.
-type FuncV2 func(ctx context.Context, request *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error)
+type FuncV2 func(ctx context.Context, request *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error)
 
 // Handler is a http.Handler which accepts WriteRequests.
 func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push Func, pushV2 FuncV2) http.Handler {
@@ -107,7 +106,7 @@ func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push F
 				http.Error(w, string(resp.Body), int(resp.Code))
 			}
 		case config.RemoteWriteProtoMsgV2:
-			var req cortexpbv2.WriteRequest
+			var req cortexpb.WriteRequestV2
 			err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy)
 			if err != nil {
 				fmt.Println("err", err)
@@ -118,7 +117,7 @@ func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push F
 
 			req.SkipLabelNameValidation = false
 			if req.Source == 0 {
-				req.Source = cortexpbv2.API
+				req.Source = cortexpb.API
 			}
 
 			if resp, err := pushV2(ctx, &req); err != nil {
diff --git a/pkg/util/push/push_test.go b/pkg/util/push/push_test.go
index 178f2346c9..e4839cec93 100644
--- a/pkg/util/push/push_test.go
+++ b/pkg/util/push/push_test.go
@@ -16,11 +16,10 @@ import (
 	"github.com/weaveworks/common/middleware"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 )
 
 func TestHandler_remoteWrite(t *testing.T) {
-	handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.API), verifyWriteRequestV2Handler(t, cortexpbv2.API))
+	handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.API), verifyWriteRequestV2Handler(t, cortexpb.API))
 
 	t.Run("remote write v1", func(t *testing.T) {
 		req := createRequest(t, createPrometheusRemoteWriteProtobuf(t), false)
@@ -45,7 +44,7 @@ func TestHandler_remoteWrite(t *testing.T) {
 
 func TestHandler_ContentTypeAndEncoding(t *testing.T) {
 	sourceIPs, _ := middleware.NewSourceIPs("SomeField", "(.*)")
-	handler := Handler(100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.API), verifyWriteRequestV2Handler(t, cortexpbv2.API))
+	handler := Handler(100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.API), verifyWriteRequestV2Handler(t, cortexpb.API))
 
 	tests := []struct {
 		description  string
@@ -149,7 +148,7 @@ func TestHandler_ContentTypeAndEncoding(t *testing.T) {
 	for _, test := range tests {
 		t.Run(test.description, func(t *testing.T) {
 			if test.isV2 {
-				req := createRequestWithHeaders(t, test.reqHeaders, createCortexRemoteWriteV2Protobuf(t, false, cortexpbv2.API))
+				req := createRequestWithHeaders(t, test.reqHeaders, createCortexRemoteWriteV2Protobuf(t, false, cortexpb.API))
 				resp := httptest.NewRecorder()
 				handler.ServeHTTP(resp, req)
 				assert.Equal(t, test.expectedCode, resp.Code)
@@ -165,7 +164,7 @@ func TestHandler_ContentTypeAndEncoding(t *testing.T) {
 
 func TestHandler_cortexWriteRequest(t *testing.T) {
 	sourceIPs, _ := middleware.NewSourceIPs("SomeField", "(.*)")
-	handler := Handler(100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.RULE), verifyWriteRequestV2Handler(t, cortexpbv2.RULE))
+	handler := Handler(100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.RULE), verifyWriteRequestV2Handler(t, cortexpb.RULE))
 
 	t.Run("remote write v1", func(t *testing.T) {
 		req := createRequest(t, createCortexWriteRequestProtobuf(t, false, cortexpb.RULE), false)
@@ -174,7 +173,7 @@ func TestHandler_cortexWriteRequest(t *testing.T) {
 		assert.Equal(t, 200, resp.Code)
 	})
 	t.Run("remote write v2", func(t *testing.T) {
-		req := createRequest(t, createCortexRemoteWriteV2Protobuf(t, false, cortexpbv2.RULE), true)
+		req := createRequest(t, createCortexRemoteWriteV2Protobuf(t, false, cortexpb.RULE), true)
 		resp := httptest.NewRecorder()
 		handler.ServeHTTP(resp, req)
 		assert.Equal(t, 200, resp.Code)
@@ -185,25 +184,25 @@ func TestHandler_ignoresSkipLabelNameValidationIfSet(t *testing.T) {
 	for _, req := range []*http.Request{
 		createRequest(t, createCortexWriteRequestProtobuf(t, true, cortexpb.RULE), false),
 		createRequest(t, createCortexWriteRequestProtobuf(t, false, cortexpb.RULE), false),
-		createRequest(t, createCortexRemoteWriteV2Protobuf(t, true, cortexpbv2.RULE), true),
-		createRequest(t, createCortexRemoteWriteV2Protobuf(t, false, cortexpbv2.RULE), true),
+		createRequest(t, createCortexRemoteWriteV2Protobuf(t, true, cortexpb.RULE), true),
+		createRequest(t, createCortexRemoteWriteV2Protobuf(t, false, cortexpb.RULE), true),
 	} {
 		resp := httptest.NewRecorder()
-		handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.RULE), verifyWriteRequestV2Handler(t, cortexpbv2.RULE))
+		handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.RULE), verifyWriteRequestV2Handler(t, cortexpb.RULE))
 		handler.ServeHTTP(resp, req)
 		assert.Equal(t, 200, resp.Code)
 	}
 }
 
-func verifyWriteRequestV2Handler(t *testing.T, expectSource cortexpbv2.WriteRequest_SourceEnum) func(ctx context.Context, request *cortexpbv2.WriteRequest) (response *cortexpbv2.WriteResponse, err error) {
+func verifyWriteRequestV2Handler(t *testing.T, expectSource cortexpb.SourceEnum) func(ctx context.Context, request *cortexpb.WriteRequestV2) (response *cortexpb.WriteResponseV2, err error) {
 	t.Helper()
-	return func(ctx context.Context, request *cortexpbv2.WriteRequest) (response *cortexpbv2.WriteResponse, err error) {
+	return func(ctx context.Context, request *cortexpb.WriteRequestV2) (response *cortexpb.WriteResponseV2, err error) {
 		assert.Len(t, request.Timeseries, 1)
 		assert.Equal(t, "__name__", request.Symbols[1])
 		assert.Equal(t, "foo", request.Symbols[2])
 		assert.Equal(t, expectSource, request.Source)
 		assert.False(t, request.SkipLabelNameValidation)
-		resp := &cortexpbv2.WriteResponse{
+		resp := &cortexpb.WriteResponseV2{
 			Samples:    1,
 			Histograms: 1,
 			Exemplars:  1,
@@ -212,7 +211,7 @@ func verifyWriteRequestV2Handler(t *testing.T, expectSource cortexpbv2.WriteRequ
 	}
 }
 
-func verifyWriteRequestHandler(t *testing.T, expectSource cortexpb.WriteRequest_SourceEnum) func(ctx context.Context, request *cortexpb.WriteRequest) (response *cortexpb.WriteResponse, err error) {
+func verifyWriteRequestHandler(t *testing.T, expectSource cortexpb.SourceEnum) func(ctx context.Context, request *cortexpb.WriteRequest) (response *cortexpb.WriteResponse, err error) {
 	t.Helper()
 	return func(ctx context.Context, request *cortexpb.WriteRequest) (response *cortexpb.WriteResponse, err error) {
 		assert.Len(t, request.Timeseries, 1)
@@ -255,16 +254,16 @@ func createRequest(t *testing.T, protobuf []byte, isV2 bool) *http.Request {
 	return req
 }
 
-func createCortexRemoteWriteV2Protobuf(t *testing.T, skipLabelNameValidation bool, source cortexpbv2.WriteRequest_SourceEnum) []byte {
+func createCortexRemoteWriteV2Protobuf(t *testing.T, skipLabelNameValidation bool, source cortexpb.SourceEnum) []byte {
 	t.Helper()
-	input := cortexpbv2.WriteRequest{
+	input := cortexpb.WriteRequestV2{
 		Symbols: []string{"", "__name__", "foo"},
-		Timeseries: []cortexpbv2.PreallocTimeseriesV2{
+		Timeseries: []cortexpb.PreallocTimeseriesV2{
 			{
-				TimeSeries: &cortexpbv2.TimeSeries{
+				TimeSeriesV2: &cortexpb.TimeSeriesV2{
 					LabelsRefs: []uint32{1, 2},
-					Samples: []cortexpbv2.Sample{
-						{Value: 1, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()},
+					Samples: []cortexpb.Sample{
+						{Value: 1, TimestampMs: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()},
 					},
 				},
 			},
@@ -315,7 +314,7 @@ func createPrometheusRemoteWriteProtobuf(t *testing.T) []byte {
 	require.NoError(t, err)
 	return inoutBytes
 }
-func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool, source cortexpb.WriteRequest_SourceEnum) []byte {
+func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool, source cortexpb.SourceEnum) []byte {
 	t.Helper()
 	ts := cortexpb.PreallocTimeseries{
 		TimeSeries: &cortexpb.TimeSeries{
diff --git a/pkg/util/validation/validate.go b/pkg/util/validation/validate.go
index dda50dddb3..292fd273cd 100644
--- a/pkg/util/validation/validate.go
+++ b/pkg/util/validation/validate.go
@@ -17,7 +17,6 @@ import (
 	"github.com/weaveworks/common/httpgrpc"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/util"
 	"github.com/cortexproject/cortex/pkg/util/extract"
 )
@@ -154,7 +153,7 @@ func ValidateSampleTimestamp(validateMetrics *ValidateMetrics, limits *Limits, u
 
 // ValidateExemplarV2 returns an error if the exemplar is invalid.
 // The returned error may retain the provided series labels.
-func ValidateExemplarV2(validateMetrics *ValidateMetrics, symbols []string, userID string, seriesLabels []cortexpb.LabelAdapter, e *cortexpbv2.Exemplar, b labels.ScratchBuilder, st *writev2.SymbolsTable) ValidationError {
+func ValidateExemplarV2(validateMetrics *ValidateMetrics, symbols []string, userID string, seriesLabels []cortexpb.LabelAdapter, e *cortexpb.ExemplarV2, b labels.ScratchBuilder, st *writev2.SymbolsTable) ValidationError {
 	lbs := e.ToLabels(&b, symbols)
 	// symbolize examplar labels
 	e.LabelsRefs = st.SymbolizeLabels(lbs, nil)
@@ -290,7 +289,7 @@ func ValidateLabels(validateMetrics *ValidateMetrics, limits *Limits, userID str
 }
 
 // ValidateMetadata returns an err if a metric metadata is invalid.
-func ValidateMetadataV2(validateMetrics *ValidateMetrics, cfg *Limits, userID string, symbols []string, metadata *cortexpbv2.Metadata, st *writev2.SymbolsTable) error {
+func ValidateMetadataV2(validateMetrics *ValidateMetrics, cfg *Limits, userID string, symbols []string, metadata *cortexpb.MetadataV2, st *writev2.SymbolsTable) error {
 	help := symbols[metadata.HelpRef]
 	unit := symbols[metadata.UnitRef]
 
@@ -353,67 +352,6 @@ func ValidateMetadata(validateMetrics *ValidateMetrics, cfg *Limits, userID stri
 	return nil
 }
 
-func ValidateNativeHistogramV2(validateMetrics *ValidateMetrics, limits *Limits, userID string, ls []cortexpb.LabelAdapter, histogramSample cortexpbv2.Histogram) (cortexpbv2.Histogram, error) {
-	if limits.MaxNativeHistogramBuckets == 0 {
-		return histogramSample, nil
-	}
-
-	var (
-		exceedLimit bool
-	)
-	if histogramSample.IsFloatHistogram() {
-		// Initial check to see if the bucket limit is exceeded or not. If not, we can avoid type casting.
-		exceedLimit = len(histogramSample.PositiveCounts)+len(histogramSample.NegativeCounts) > limits.MaxNativeHistogramBuckets
-		if !exceedLimit {
-			return histogramSample, nil
-		}
-		// Exceed limit.
-		if histogramSample.Schema <= histogram.ExponentialSchemaMin {
-			validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc()
-			return cortexpbv2.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets)
-		}
-		fh := cortexpbv2.FloatHistogramProtoToFloatHistogram(histogramSample)
-		oBuckets := len(fh.PositiveBuckets) + len(fh.NegativeBuckets)
-		for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > limits.MaxNativeHistogramBuckets {
-			if fh.Schema <= histogram.ExponentialSchemaMin {
-				validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc()
-				return cortexpbv2.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets)
-			}
-			fh = fh.ReduceResolution(fh.Schema - 1)
-		}
-		if oBuckets != len(fh.PositiveBuckets)+len(fh.NegativeBuckets) {
-			validateMetrics.HistogramSamplesReducedResolution.WithLabelValues(userID).Inc()
-		}
-		// If resolution reduced, convert new float histogram to protobuf type again.
-		return cortexpbv2.FloatHistogramToHistogramProto(histogramSample.Timestamp, fh), nil
-	}
-
-	// Initial check to see if bucket limit is exceeded or not. If not, we can avoid type casting.
-	exceedLimit = len(histogramSample.PositiveDeltas)+len(histogramSample.NegativeDeltas) > limits.MaxNativeHistogramBuckets
-	if !exceedLimit {
-		return histogramSample, nil
-	}
-	// Exceed limit.
-	if histogramSample.Schema <= histogram.ExponentialSchemaMin {
-		validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc()
-		return cortexpbv2.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets)
-	}
-	h := cortexpbv2.HistogramProtoToHistogram(histogramSample)
-	oBuckets := len(h.PositiveBuckets) + len(h.NegativeBuckets)
-	for len(h.PositiveBuckets)+len(h.NegativeBuckets) > limits.MaxNativeHistogramBuckets {
-		if h.Schema <= histogram.ExponentialSchemaMin {
-			validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc()
-			return cortexpbv2.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets)
-		}
-		h = h.ReduceResolution(h.Schema - 1)
-	}
-	if oBuckets != len(h.PositiveBuckets)+len(h.NegativeBuckets) {
-		validateMetrics.HistogramSamplesReducedResolution.WithLabelValues(userID).Inc()
-	}
-	// If resolution reduced, convert new histogram to protobuf type again.
-	return cortexpbv2.HistogramToHistogramProto(histogramSample.Timestamp, h), nil
-}
-
 func ValidateNativeHistogram(validateMetrics *ValidateMetrics, limits *Limits, userID string, ls []cortexpb.LabelAdapter, histogramSample cortexpb.Histogram) (cortexpb.Histogram, error) {
 	if limits.MaxNativeHistogramBuckets == 0 {
 		return histogramSample, nil

From 0bee591f5300f5e415f7954b8a46f6969a62ee07 Mon Sep 17 00:00:00 2001
From: SungJin1212 <tjdwls1201@gmail.com>
Date: Mon, 11 Nov 2024 12:09:10 +0900
Subject: [PATCH 6/6] Add rolling update e2e test

Signed-off-by: SungJin1212 <tjdwls1201@gmail.com>
---
 integration/remote_write_v2_test.go     | 106 ++++++++++++++++++++++++
 pkg/cortex/modules_test.go              |   3 +-
 pkg/cortexpb/cortex.proto               |   2 +-
 pkg/cortexpb/slicesPool.go              |   6 +-
 pkg/cortexpb/slicesPool_test.go         |  12 +--
 pkg/cortexpb/timeseries.go              |   6 +-
 pkg/cortexpb/timeseriesv2.go            |   6 +-
 pkg/ingester/client/cortex_mock_test.go |   5 +-
 pkg/querier/tripperware/query.pb.go     |   1 -
 pkg/ruler/compat_test.go                |   7 +-
 pkg/ruler/pusher_mock_test.go           |   5 +-
 11 files changed, 130 insertions(+), 29 deletions(-)

diff --git a/integration/remote_write_v2_test.go b/integration/remote_write_v2_test.go
index 775ccb98bb..9afde5e35a 100644
--- a/integration/remote_write_v2_test.go
+++ b/integration/remote_write_v2_test.go
@@ -23,6 +23,112 @@ import (
 	"github.com/cortexproject/cortex/pkg/storage/tsdb"
 )
 
+func TestIngesterRollingUpdate(t *testing.T) {
+
+	const blockRangePeriod = 5 * time.Second
+	nonPushV2SupportImage := "quay.io/cortexproject/cortex:v1.18.1"
+
+	s, err := e2e.NewScenario(networkName)
+	require.NoError(t, err)
+	defer s.Close()
+
+	// Start dependencies.
+	consul := e2edb.NewConsulWithName("consul")
+	require.NoError(t, s.StartAndWaitReady(consul))
+
+	flags := mergeFlags(
+		AlertmanagerLocalFlags(),
+		map[string]string{
+			"-store.engine":                                     blocksStorageEngine,
+			"-blocks-storage.backend":                           "filesystem",
+			"-blocks-storage.tsdb.head-compaction-interval":     "4m",
+			"-blocks-storage.bucket-store.sync-interval":        "15m",
+			"-blocks-storage.bucket-store.index-cache.backend":  tsdb.IndexCacheBackendInMemory,
+			"-blocks-storage.bucket-store.bucket-index.enabled": "true",
+			"-querier.query-store-for-labels-enabled":           "true",
+			"-blocks-storage.tsdb.block-ranges-period":          blockRangePeriod.String(),
+			"-blocks-storage.tsdb.ship-interval":                "1s",
+			"-blocks-storage.tsdb.retention-period":             ((blockRangePeriod * 2) - 1).String(),
+			"-blocks-storage.tsdb.enable-native-histograms":     "true",
+			// Ingester.
+			"-ring.store":      "consul",
+			"-consul.hostname": consul.NetworkHTTPEndpoint(),
+			// Distributor.
+			"-distributor.replication-factor": "1",
+			// Store-gateway.
+			"-store-gateway.sharding-enabled": "false",
+			// alert manager
+			"-alertmanager.web.external-url": "http://localhost/alertmanager",
+		},
+	)
+
+	// make alert manager config dir
+	require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{}))
+
+	path := path.Join(s.SharedDir(), "cortex-1")
+
+	flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path})
+	// Start Cortex replicas.
+	// Start all other services.
+	ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, nonPushV2SupportImage)
+	distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "")
+	storeGateway := e2ecortex.NewStoreGateway("store-gateway", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "")
+	querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), mergeFlags(flags, map[string]string{
+		"-querier.store-gateway-addresses": storeGateway.NetworkGRPCEndpoint()}), "")
+
+	require.NoError(t, s.StartAndWaitReady(querier, ingester, distributor, storeGateway))
+
+	// Wait until Cortex replicas have updated the ring state.
+	require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total"))
+	require.NoError(t, querier.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total"))
+
+	c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), querier.HTTPEndpoint(), "", "", "user-1")
+	require.NoError(t, err)
+
+	now := time.Now()
+
+	// series push
+	symbols1, series, expectedVector := e2e.GenerateSeriesV2("test_series", now, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "foo", Value: "bar"})
+	res, err := c.PushV2(symbols1, series)
+	require.NoError(t, err)
+	require.Equal(t, 200, res.StatusCode)
+
+	// sample
+	result, err := c.Query("test_series", now)
+	require.NoError(t, err)
+	assert.Equal(t, expectedVector, result.(model.Vector))
+
+	// metadata
+	metadata, err := c.Metadata("test_series", "")
+	require.NoError(t, err)
+	require.Equal(t, 1, len(metadata["test_series"]))
+
+	// histogram
+	histogramIdx := rand.Uint32()
+	symbols2, histogramSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, false, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "false"})
+	res, err = c.PushV2(symbols2, histogramSeries)
+	require.NoError(t, err)
+	require.Equal(t, 200, res.StatusCode)
+
+	symbols3, histogramFloatSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, false, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "true"})
+	res, err = c.PushV2(symbols3, histogramFloatSeries)
+	require.NoError(t, err)
+	require.Equal(t, 200, res.StatusCode)
+
+	testHistogramTimestamp := now.Add(blockRangePeriod * 2)
+	expectedHistogram := tsdbutil.GenerateTestHistogram(int(histogramIdx))
+	result, err = c.Query(`test_histogram`, testHistogramTimestamp)
+	require.NoError(t, err)
+	require.Equal(t, model.ValVector, result.Type())
+	v := result.(model.Vector)
+	require.Equal(t, 2, v.Len())
+	for _, s := range v {
+		require.NotNil(t, s.Histogram)
+		require.Equal(t, float64(expectedHistogram.Count), float64(s.Histogram.Count))
+		require.Equal(t, float64(expectedHistogram.Sum), float64(s.Histogram.Sum))
+	}
+}
+
 func TestIngest(t *testing.T) {
 	const blockRangePeriod = 5 * time.Second
 
diff --git a/pkg/cortex/modules_test.go b/pkg/cortex/modules_test.go
index ae5fc64582..2cf79132cb 100644
--- a/pkg/cortex/modules_test.go
+++ b/pkg/cortex/modules_test.go
@@ -13,7 +13,6 @@ import (
 	"github.com/weaveworks/common/server"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 )
 
 func changeTargetConfig(c *Config) {
@@ -161,7 +160,7 @@ func TestCortex_InitRulerStorage(t *testing.T) {
 
 type myPusher struct{}
 
-func (p *myPusher) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+func (p *myPusher) PushV2(ctx context.Context, req *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) {
 	return nil, nil
 }
 
diff --git a/pkg/cortexpb/cortex.proto b/pkg/cortexpb/cortex.proto
index b16074c007..c7b1209503 100644
--- a/pkg/cortexpb/cortex.proto
+++ b/pkg/cortexpb/cortex.proto
@@ -28,7 +28,7 @@ enum MetricType {
 // https://github.com/prometheus/prometheus/blob/main/prompb/io/prometheus/write/v2/types.proto
 message WriteRequestV2 {
   reserved 1 to 2;
-  cortexpb.SourceEnum Source = 3;
+  SourceEnum Source = 3;
   repeated string symbols = 4;
   repeated TimeSeriesV2 timeseries = 5 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseriesV2"];
 
diff --git a/pkg/cortexpb/slicesPool.go b/pkg/cortexpb/slicesPool.go
index eaace237ce..e28d51d4f2 100644
--- a/pkg/cortexpb/slicesPool.go
+++ b/pkg/cortexpb/slicesPool.go
@@ -13,7 +13,7 @@ type byteSlicePools struct {
 	pools []sync.Pool
 }
 
-func NewSlicePool(pools int) *byteSlicePools {
+func newSlicePool(pools int) *byteSlicePools {
 	sp := byteSlicePools{}
 	sp.init(pools)
 	return &sp
@@ -32,7 +32,7 @@ func (sp *byteSlicePools) init(pools int) {
 	}
 }
 
-func (sp *byteSlicePools) GetSlice(size int) *[]byte {
+func (sp *byteSlicePools) getSlice(size int) *[]byte {
 	index := int(math.Ceil(math.Log2(float64(size)))) - minPoolSizePower
 
 	if index >= len(sp.pools) {
@@ -50,7 +50,7 @@ func (sp *byteSlicePools) GetSlice(size int) *[]byte {
 	return s
 }
 
-func (sp *byteSlicePools) ReuseSlice(s *[]byte) {
+func (sp *byteSlicePools) reuseSlice(s *[]byte) {
 	index := int(math.Floor(math.Log2(float64(cap(*s))))) - minPoolSizePower
 
 	if index >= len(sp.pools) || index < 0 {
diff --git a/pkg/cortexpb/slicesPool_test.go b/pkg/cortexpb/slicesPool_test.go
index dd35beb33a..9bc56cdec3 100644
--- a/pkg/cortexpb/slicesPool_test.go
+++ b/pkg/cortexpb/slicesPool_test.go
@@ -9,22 +9,22 @@ import (
 )
 
 func TestFuzzyByteSlicePools(t *testing.T) {
-	sut := NewSlicePool(20)
+	sut := newSlicePool(20)
 	maxByteSize := int(math.Pow(2, 20+minPoolSizePower-1))
 
 	for i := 0; i < 1000; i++ {
 		size := rand.Int() % maxByteSize
-		s := sut.GetSlice(size)
+		s := sut.getSlice(size)
 		assert.Equal(t, len(*s), size)
-		sut.ReuseSlice(s)
+		sut.reuseSlice(s)
 	}
 }
 
 func TestReturnSliceSmallerThanMin(t *testing.T) {
-	sut := NewSlicePool(20)
+	sut := newSlicePool(20)
 	size := 3
 	buff := make([]byte, 0, size)
-	sut.ReuseSlice(&buff)
-	buff2 := sut.GetSlice(size * 2)
+	sut.reuseSlice(&buff)
+	buff2 := sut.getSlice(size * 2)
 	assert.Equal(t, len(*buff2), size*2)
 }
diff --git a/pkg/cortexpb/timeseries.go b/pkg/cortexpb/timeseries.go
index b880739ae2..db7354ffe4 100644
--- a/pkg/cortexpb/timeseries.go
+++ b/pkg/cortexpb/timeseries.go
@@ -47,7 +47,7 @@ var (
 			}
 		},
 	}
-	bytePool = NewSlicePool(20)
+	bytePool = newSlicePool(20)
 )
 
 // PreallocConfig configures how structures will be preallocated to optimise
@@ -86,7 +86,7 @@ func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error {
 
 func (p *PreallocWriteRequest) Marshal() (dAtA []byte, err error) {
 	size := p.Size()
-	p.data = bytePool.GetSlice(size)
+	p.data = bytePool.getSlice(size)
 	dAtA = *p.data
 	n, err := p.MarshalToSizedBuffer(dAtA[:size])
 	if err != nil {
@@ -97,7 +97,7 @@ func (p *PreallocWriteRequest) Marshal() (dAtA []byte, err error) {
 
 func ReuseWriteRequest(req *PreallocWriteRequest) {
 	if req.data != nil {
-		bytePool.ReuseSlice(req.data)
+		bytePool.reuseSlice(req.data)
 		req.data = nil
 	}
 	req.Source = 0
diff --git a/pkg/cortexpb/timeseriesv2.go b/pkg/cortexpb/timeseriesv2.go
index 28eca0f973..2d64b21d7c 100644
--- a/pkg/cortexpb/timeseriesv2.go
+++ b/pkg/cortexpb/timeseriesv2.go
@@ -34,7 +34,7 @@ var (
 			}
 		},
 	}
-	bytePoolV2 = NewSlicePool(20)
+	bytePoolV2 = newSlicePool(20)
 )
 
 // PreallocWriteRequestV2 is a WriteRequest which preallocs slices on Unmarshal.
@@ -51,7 +51,7 @@ func (p *PreallocWriteRequestV2) Unmarshal(dAtA []byte) error {
 
 func (p *PreallocWriteRequestV2) Marshal() (dAtA []byte, err error) {
 	size := p.Size()
-	p.data = bytePool.GetSlice(size)
+	p.data = bytePool.getSlice(size)
 	dAtA = *p.data
 	n, err := p.MarshalToSizedBuffer(dAtA[:size])
 	if err != nil {
@@ -73,7 +73,7 @@ func (p *PreallocTimeseriesV2) Unmarshal(dAtA []byte) error {
 
 func ReuseWriteRequestV2(req *PreallocWriteRequestV2) {
 	if req.data != nil {
-		bytePoolV2.ReuseSlice(req.data)
+		bytePoolV2.reuseSlice(req.data)
 		req.data = nil
 	}
 	req.Source = 0
diff --git a/pkg/ingester/client/cortex_mock_test.go b/pkg/ingester/client/cortex_mock_test.go
index 5aff8bf302..d378495dcc 100644
--- a/pkg/ingester/client/cortex_mock_test.go
+++ b/pkg/ingester/client/cortex_mock_test.go
@@ -6,16 +6,15 @@ import (
 	"github.com/stretchr/testify/mock"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 )
 
 type IngesterServerMock struct {
 	mock.Mock
 }
 
-func (m *IngesterServerMock) PushV2(ctx context.Context, r *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+func (m *IngesterServerMock) PushV2(ctx context.Context, r *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) {
 	args := m.Called(ctx, r)
-	return args.Get(0).(*cortexpbv2.WriteResponse), args.Error(1)
+	return args.Get(0).(*cortexpb.WriteResponseV2), args.Error(1)
 }
 
 func (m *IngesterServerMock) Push(ctx context.Context, r *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) {
diff --git a/pkg/querier/tripperware/query.pb.go b/pkg/querier/tripperware/query.pb.go
index 2e16fc9c6d..5fc41d7ed9 100644
--- a/pkg/querier/tripperware/query.pb.go
+++ b/pkg/querier/tripperware/query.pb.go
@@ -737,7 +737,6 @@ func (m *PrometheusResponseHeader) GetValues() []string {
 
 type PrometheusQueryResult struct {
 	// Types that are valid to be assigned to Result:
-	//
 	//	*PrometheusQueryResult_Vector
 	//	*PrometheusQueryResult_RawBytes
 	//	*PrometheusQueryResult_Matrix
diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go
index 7d489b65b0..21f76be12c 100644
--- a/pkg/ruler/compat_test.go
+++ b/pkg/ruler/compat_test.go
@@ -23,20 +23,19 @@ import (
 	"github.com/weaveworks/common/httpgrpc"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 	"github.com/cortexproject/cortex/pkg/querier/stats"
 	"github.com/cortexproject/cortex/pkg/util/validation"
 )
 
 type fakePusher struct {
 	request    *cortexpb.WriteRequest
-	requestV2  *cortexpbv2.WriteRequest
+	requestV2  *cortexpb.WriteRequestV2
 	response   *cortexpb.WriteResponse
-	responseV2 *cortexpbv2.WriteResponse
+	responseV2 *cortexpb.WriteResponseV2
 	err        error
 }
 
-func (p *fakePusher) PushV2(ctx context.Context, r *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+func (p *fakePusher) PushV2(ctx context.Context, r *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) {
 	p.requestV2 = r
 	return p.responseV2, p.err
 }
diff --git a/pkg/ruler/pusher_mock_test.go b/pkg/ruler/pusher_mock_test.go
index 02fc5b13de..c909e66d22 100644
--- a/pkg/ruler/pusher_mock_test.go
+++ b/pkg/ruler/pusher_mock_test.go
@@ -6,7 +6,6 @@ import (
 	"github.com/stretchr/testify/mock"
 
 	"github.com/cortexproject/cortex/pkg/cortexpb"
-	"github.com/cortexproject/cortex/pkg/cortexpbv2"
 )
 
 type pusherMock struct {
@@ -17,9 +16,9 @@ func newPusherMock() *pusherMock {
 	return &pusherMock{}
 }
 
-func (m *pusherMock) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) {
+func (m *pusherMock) PushV2(ctx context.Context, req *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) {
 	args := m.Called(ctx, req)
-	return args.Get(0).(*cortexpbv2.WriteResponse), args.Error(1)
+	return args.Get(0).(*cortexpb.WriteResponseV2), args.Error(1)
 }
 
 func (m *pusherMock) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) {