diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 7ca90a4185e..fe475d5ed09 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -2557,7 +2557,7 @@ func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through result := make([]labels.Labels, 0, len(metrics)) for _, m := range metrics { - if err := queryLimiter.AddSeries(mimirpb.FromLabelsToLabelAdapters(m)); err != nil { + if err := queryLimiter.AddSeries(m); err != nil { return nil, err } result = append(result, m) diff --git a/pkg/distributor/query.go b/pkg/distributor/query.go index 73610f6e089..ee381656840 100644 --- a/pkg/distributor/query.go +++ b/pkg/distributor/query.go @@ -268,7 +268,7 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSets [ if len(resp.Timeseries) > 0 { for _, series := range resp.Timeseries { - if limitErr := queryLimiter.AddSeries(series.Labels); limitErr != nil { + if limitErr := queryLimiter.AddSeries(mimirpb.FromLabelAdaptersToLabels(series.Labels)); limitErr != nil { return ingesterQueryResult{}, limitErr } } @@ -285,7 +285,7 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSets [ } for _, series := range resp.Chunkseries { - if err := queryLimiter.AddSeries(series.Labels); err != nil { + if err := queryLimiter.AddSeries(mimirpb.FromLabelAdaptersToLabels(series.Labels)); err != nil { return ingesterQueryResult{}, err } } @@ -300,7 +300,9 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSets [ streamingSeriesCount += len(resp.StreamingSeries) for _, s := range resp.StreamingSeries { - if err := queryLimiter.AddSeries(s.Labels); err != nil { + l := mimirpb.FromLabelAdaptersToLabels(s.Labels) + + if err := queryLimiter.AddSeries(l); err != nil { return ingesterQueryResult{}, err } @@ -313,7 +315,7 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSets [ return ingesterQueryResult{}, err } - labelsBatch = append(labelsBatch, mimirpb.FromLabelAdaptersToLabels(s.Labels)) + labelsBatch = append(labelsBatch, l) } streamingSeriesBatches = append(streamingSeriesBatches, labelsBatch) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 3d917a3a02e..a6f17182594 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -813,7 +813,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor mySeries = append(mySeries, s) // Add series fingerprint to query limiter; will return error if we are over the limit - if err := queryLimiter.AddSeries(s.Labels); err != nil { + if err := queryLimiter.AddSeries(mimirpb.FromLabelAdaptersToLabels(s.Labels)); err != nil { return err } @@ -854,14 +854,18 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor if ss := resp.GetStreamingSeries(); ss != nil { myStreamingSeriesLabels = slices.Grow(myStreamingSeriesLabels, len(ss.Series)) + for _, s := range ss.Series { // Add series fingerprint to query limiter; will return error if we are over the limit - if limitErr := queryLimiter.AddSeries(s.Labels); limitErr != nil { + l := mimirpb.FromLabelAdaptersToLabels(s.Labels) + + if limitErr := queryLimiter.AddSeries(l); limitErr != nil { return limitErr } - myStreamingSeriesLabels = append(myStreamingSeriesLabels, mimirpb.FromLabelAdaptersToLabels(s.Labels)) + myStreamingSeriesLabels = append(myStreamingSeriesLabels, l) } + if ss.IsEndOfSeriesStream { // If we aren't expecting any series from this stream, close it now. if len(myStreamingSeriesLabels) == 0 { diff --git a/pkg/util/limiter/query_limiter.go b/pkg/util/limiter/query_limiter.go index fcd61b88869..0a7333f7721 100644 --- a/pkg/util/limiter/query_limiter.go +++ b/pkg/util/limiter/query_limiter.go @@ -9,9 +9,9 @@ import ( "context" "sync" + "github.com/prometheus/prometheus/model/labels" "go.uber.org/atomic" - "github.com/grafana/mimir/pkg/mimirpb" "github.com/grafana/mimir/pkg/querier/stats" "github.com/grafana/mimir/pkg/util/validation" ) @@ -74,12 +74,12 @@ func QueryLimiterFromContextWithFallback(ctx context.Context) *QueryLimiter { } // AddSeries adds the input series and returns an error if the limit is reached. -func (ql *QueryLimiter) AddSeries(seriesLabels []mimirpb.LabelAdapter) validation.LimitError { +func (ql *QueryLimiter) AddSeries(seriesLabels labels.Labels) validation.LimitError { // If the max series is unlimited just return without managing map if ql.maxSeriesPerQuery == 0 { return nil } - fingerprint := mimirpb.FromLabelAdaptersToLabels(seriesLabels).Hash() + fingerprint := seriesLabels.Hash() ql.uniqueSeriesMx.Lock() defer ql.uniqueSeriesMx.Unlock() diff --git a/pkg/util/limiter/query_limiter_test.go b/pkg/util/limiter/query_limiter_test.go index 0d8041e4e4d..73179122241 100644 --- a/pkg/util/limiter/query_limiter_test.go +++ b/pkg/util/limiter/query_limiter_test.go @@ -16,7 +16,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/grafana/mimir/pkg/mimirpb" "github.com/grafana/mimir/pkg/querier/stats" ) @@ -37,15 +36,15 @@ func TestQueryLimiter_AddSeries_ShouldReturnNoErrorOnLimitNotExceeded(t *testing reg = prometheus.NewPedanticRegistry() limiter = NewQueryLimiter(100, 0, 0, 0, stats.NewQueryMetrics(reg)) ) - err := limiter.AddSeries(mimirpb.FromLabelsToLabelAdapters(series1)) + err := limiter.AddSeries(series1) assert.NoError(t, err) - err = limiter.AddSeries(mimirpb.FromLabelsToLabelAdapters(series2)) + err = limiter.AddSeries(series2) assert.NoError(t, err) assert.Equal(t, 2, limiter.uniqueSeriesCount()) assertRejectedQueriesMetricValue(t, reg, 0, 0, 0, 0) // Re-add previous series to make sure it's not double counted - err = limiter.AddSeries(mimirpb.FromLabelsToLabelAdapters(series1)) + err = limiter.AddSeries(series1) assert.NoError(t, err) assert.Equal(t, 2, limiter.uniqueSeriesCount()) assertRejectedQueriesMetricValue(t, reg, 0, 0, 0, 0) @@ -72,21 +71,21 @@ func TestQueryLimiter_AddSeries_ShouldReturnErrorOnLimitExceeded(t *testing.T) { reg = prometheus.NewPedanticRegistry() limiter = NewQueryLimiter(1, 0, 0, 0, stats.NewQueryMetrics(reg)) ) - err := limiter.AddSeries(mimirpb.FromLabelsToLabelAdapters(series1)) + err := limiter.AddSeries(series1) require.NoError(t, err) assertRejectedQueriesMetricValue(t, reg, 0, 0, 0, 0) - err = limiter.AddSeries(mimirpb.FromLabelsToLabelAdapters(series2)) + err = limiter.AddSeries(series2) require.Error(t, err) assertRejectedQueriesMetricValue(t, reg, 1, 0, 0, 0) // Add the same series again and ensure that we don't increment the failed queries metric again. - err = limiter.AddSeries(mimirpb.FromLabelsToLabelAdapters(series2)) + err = limiter.AddSeries(series2) require.Error(t, err) assertRejectedQueriesMetricValue(t, reg, 1, 0, 0, 0) // Add another series and ensure that we don't increment the failed queries metric again. - err = limiter.AddSeries(mimirpb.FromLabelsToLabelAdapters(series3)) + err = limiter.AddSeries(series3) require.Error(t, err) assertRejectedQueriesMetricValue(t, reg, 1, 0, 0, 0) } @@ -188,7 +187,7 @@ func BenchmarkQueryLimiter_AddSeries(b *testing.B) { reg := prometheus.NewPedanticRegistry() limiter := NewQueryLimiter(b.N+1, 0, 0, 0, stats.NewQueryMetrics(reg)) for _, s := range series { - err := limiter.AddSeries(mimirpb.FromLabelsToLabelAdapters(s)) + err := limiter.AddSeries(s) assert.NoError(b, err) } }