From 76ab843e3bb3d6517fcd1ee47e919bd9d107ef0d Mon Sep 17 00:00:00 2001 From: Joshua Hesketh Date: Mon, 9 Dec 2024 21:01:11 +1100 Subject: [PATCH] Vendor update mimir-prometheus at aa96f2e80ba9 (#10168) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Vendor update mimir-prometheus at aa96f2e80ba9 Which is upstream Prometheus at b9dd95f499aa2912d9cfbcfddfbdd6bc0a5207a5 * Do not use new limit in storage.NewMergeSeriesSet * MQE: Fix our test cases Mostly just annotation infos. * MQE: re-sync upstream tests * Update otlp * Update changelog Signed-off-by: György Krajcsovits --------- Signed-off-by: György Krajcsovits Co-authored-by: György Krajcsovits --- CHANGELOG.md | 1 + go.mod | 6 +- go.sum | 16 +- .../otlp/metrics_to_prw_generated.go | 17 ++ .../otlp_to_openmetrics_metadata_generated.go | 35 --- pkg/ingester/ingester.go | 2 +- pkg/querier/blocks_store_queryable.go | 2 +- pkg/querier/distributor_queryable.go | 2 +- pkg/querier/querier.go | 4 +- .../tenantfederation/merge_queryable.go | 2 +- .../testdata/ours/binary_operators.test | 44 +-- .../testdata/ours/functions.test | 98 +++--- .../testdata/ours/histograms.test | 16 +- .../testdata/ours/native_histograms.test | 6 +- .../testdata/upstream/aggregators.test | 30 +- .../testdata/upstream/at_modifier.test | 3 +- .../testdata/upstream/functions.test | 138 ++++----- .../testdata/upstream/histograms.test | 8 +- .../upstream/name_label_dropping.test | 96 +++--- .../testdata/upstream/native_histograms.test | 20 ++ .../testdata/upstream/operators.test | 281 ++++++++++-------- .../testdata/upstream/selectors.test | 172 +++++------ .../testdata/upstream/subquery.test | 88 +++--- .../prometheus/model/relabel/relabel.go | 34 ++- .../prometheus/model/textparse/nhcbparse.go | 34 ++- .../model/textparse/openmetricslex.l | 1 + .../model/textparse/openmetricslex.l.go | 130 +++++--- .../model/textparse/openmetricsparse.go | 2 +- .../prometheus/notifier/notifier.go | 8 +- .../prometheus/prometheus/promql/engine.go | 8 +- .../prometheus/prometheus/promql/functions.go | 46 +-- .../prometheus/promql/promqltest/test.go | 45 +-- .../promqltest/testdata/aggregators.test | 30 +- .../promqltest/testdata/at_modifier.test | 3 +- .../promql/promqltest/testdata/functions.test | 138 ++++----- .../promqltest/testdata/histograms.test | 8 +- .../testdata/name_label_dropping.test | 96 +++--- .../testdata/native_histograms.test | 20 ++ .../promql/promqltest/testdata/operators.test | 281 ++++++++++-------- .../promql/promqltest/testdata/selectors.test | 172 +++++------ .../promql/promqltest/testdata/subquery.test | 84 +++--- .../prometheus/prometheus/storage/merge.go | 104 ++++--- .../prometheusremotewrite/metrics_to_prw.go | 17 ++ .../otlp_to_openmetrics_metadata.go | 34 --- .../prometheusremotewrite/timeseries.go | 5 + .../storage/remote/write_handler.go | 1 + .../prometheus/prometheus/tsdb/compact.go | 4 +- .../prometheus/prometheus/tsdb/head_wal.go | 45 +-- .../prometheus/tsdb/index/postings.go | 6 +- .../prometheus/tsdb/wlog/watcher.go | 4 +- .../util/convertnhcb/convertnhcb.go | 39 +-- .../prometheus/util/logging/dedupe.go | 2 + .../prometheus/prometheus/web/api/v1/api.go | 4 +- vendor/modules.txt | 8 +- 54 files changed, 1338 insertions(+), 1162 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 277755851d0..2363abb10ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -79,6 +79,7 @@ * [ENHANCEMENT] Ingester: do not reuse labels, samples and histograms slices in the write request if there are more entries than 10x the pre-allocated size. This should help to reduce the in-use memory in case of few requests with a very large number of labels, samples or histograms. #10040 * [ENHANCEMENT] Query-Frontend: prune ` and on() (vector(x)==y)` style queries and stop pruning ` < -Inf`. Triggered by https://github.com/prometheus/prometheus/pull/15245. #10026 * [ENHANCEMENT] Query-Frontend: perform request format validation before processing the request. #10093 +* [ENHANCEMENT] Distributor: OTLP receiver now converts also metric metadata. See also https://github.com/prometheus/prometheus/pull/15416. #10168 * [BUGFIX] Fix issue where functions such as `rate()` over native histograms could return incorrect values if a float stale marker was present in the selected range. #9508 * [BUGFIX] Fix issue where negation of native histograms (eg. `-some_native_histogram_series`) did nothing. #9508 * [BUGFIX] Fix issue where `metric might not be a counter, name does not end in _total/_sum/_count/_bucket` annotation would be emitted even if `rate` or `increase` did not have enough samples to compute a result. #9508 diff --git a/go.mod b/go.mod index b2b9913cc67..0dbf661bc7b 100644 --- a/go.mod +++ b/go.mod @@ -143,8 +143,8 @@ require ( gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/mail.v2 v2.3.1 // indirect gopkg.in/telebot.v3 v3.2.1 // indirect - k8s.io/apimachinery v0.31.1 // indirect - k8s.io/client-go v0.31.1 // indirect + k8s.io/apimachinery v0.31.2 // indirect + k8s.io/client-go v0.31.2 // indirect k8s.io/klog/v2 v2.130.1 // indirect ) @@ -285,7 +285,7 @@ require ( ) // Using a fork of Prometheus with Mimir-specific changes. -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20241205085346-9acc41d486c3 +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20241209002314-aa96f2e80ba9 // Replace memberlist with our fork which includes some fixes that haven't been // merged upstream yet: diff --git a/go.sum b/go.sum index d6c75b11a46..8224c0fff5c 100644 --- a/go.sum +++ b/go.sum @@ -1279,8 +1279,8 @@ github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 h1:1TeKhyS+pvzO github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40/go.mod h1:IGRj8oOoxwJbHBYl1+OhS9UjQR0dv6SQOep7HqmtyFU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/mimir-prometheus v0.0.0-20241205085346-9acc41d486c3 h1:zfF3LBLsR/NsaIzSHU+inDjjXV39fmxuZWWQQH1DQTQ= -github.com/grafana/mimir-prometheus v0.0.0-20241205085346-9acc41d486c3/go.mod h1:WKcF75sR/zMmNA/Ow3IxuBKC/gKOzGr63UkiAiaHr7I= +github.com/grafana/mimir-prometheus v0.0.0-20241209002314-aa96f2e80ba9 h1:I4jdDM9/WraFtSj7+MG3xXQepcDN3rfwWYARvGaMGRY= +github.com/grafana/mimir-prometheus v0.0.0-20241209002314-aa96f2e80ba9/go.mod h1:gB6t9u6kr6bv2nBVvVLqxWNeQWzQEGcW0iLgZY+2eyg= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956 h1:em1oddjXL8c1tL0iFdtVtPloq2hRPen2MJQKoAWpxu0= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/grafana/prometheus-alertmanager v0.25.1-0.20240930132144-b5e64e81e8d3 h1:6D2gGAwyQBElSrp3E+9lSr7k8gLuP3Aiy20rweLWeBw= @@ -2584,12 +2584,12 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= -k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= -k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= -k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= -k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= diff --git a/pkg/distributor/otlp/metrics_to_prw_generated.go b/pkg/distributor/otlp/metrics_to_prw_generated.go index 4aeafeb11ce..4b619c8a5d8 100644 --- a/pkg/distributor/otlp/metrics_to_prw_generated.go +++ b/pkg/distributor/otlp/metrics_to_prw_generated.go @@ -61,6 +61,7 @@ type MimirConverter struct { unique map[uint64]*mimirpb.TimeSeries conflicts map[uint64][]*mimirpb.TimeSeries everyN everyNTimes + metadata []mimirpb.MetricMetadata } func NewMimirConverter() *MimirConverter { @@ -74,6 +75,16 @@ func NewMimirConverter() *MimirConverter { func (c *MimirConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger *slog.Logger) (annots annotations.Annotations, errs error) { c.everyN = everyNTimes{n: 128} resourceMetricsSlice := md.ResourceMetrics() + + numMetrics := 0 + for i := 0; i < resourceMetricsSlice.Len(); i++ { + scopeMetricsSlice := resourceMetricsSlice.At(i).ScopeMetrics() + for j := 0; j < scopeMetricsSlice.Len(); j++ { + numMetrics += scopeMetricsSlice.At(j).Metrics().Len() + } + } + c.metadata = make([]mimirpb.MetricMetadata, 0, numMetrics) + for i := 0; i < resourceMetricsSlice.Len(); i++ { resourceMetrics := resourceMetricsSlice.At(i) resource := resourceMetrics.Resource() @@ -100,6 +111,12 @@ func (c *MimirConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, se } promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes, settings.AllowUTF8) + c.metadata = append(c.metadata, mimirpb.MetricMetadata{ + Type: otelMetricTypeToPromMetricType(metric), + MetricFamilyName: promName, + Help: metric.Description(), + Unit: metric.Unit(), + }) // handle individual metrics based on type //exhaustive:enforce diff --git a/pkg/distributor/otlp/otlp_to_openmetrics_metadata_generated.go b/pkg/distributor/otlp/otlp_to_openmetrics_metadata_generated.go index 2c46a2ecf56..0717c88c58e 100644 --- a/pkg/distributor/otlp/otlp_to_openmetrics_metadata_generated.go +++ b/pkg/distributor/otlp/otlp_to_openmetrics_metadata_generated.go @@ -21,8 +21,6 @@ package otlp import ( "go.opentelemetry.io/collector/pdata/pmetric" - prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" - "github.com/grafana/mimir/pkg/mimirpb" ) @@ -45,36 +43,3 @@ func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) mimirpb.MetricMet } return mimirpb.UNKNOWN } - -func OtelMetricsToMetadata(md pmetric.Metrics, addMetricSuffixes, allowUTF8 bool) []*mimirpb.MetricMetadata { - resourceMetricsSlice := md.ResourceMetrics() - - metadataLength := 0 - for i := 0; i < resourceMetricsSlice.Len(); i++ { - scopeMetricsSlice := resourceMetricsSlice.At(i).ScopeMetrics() - for j := 0; j < scopeMetricsSlice.Len(); j++ { - metadataLength += scopeMetricsSlice.At(j).Metrics().Len() - } - } - - var metadata = make([]*mimirpb.MetricMetadata, 0, metadataLength) - for i := 0; i < resourceMetricsSlice.Len(); i++ { - resourceMetrics := resourceMetricsSlice.At(i) - scopeMetricsSlice := resourceMetrics.ScopeMetrics() - - for j := 0; j < scopeMetricsSlice.Len(); j++ { - scopeMetrics := scopeMetricsSlice.At(j) - for k := 0; k < scopeMetrics.Metrics().Len(); k++ { - metric := scopeMetrics.Metrics().At(k) - entry := mimirpb.MetricMetadata{ - Type: otelMetricTypeToPromMetricType(metric), - MetricFamilyName: prometheustranslator.BuildCompliantName(metric, "", addMetricSuffixes, allowUTF8), - Help: metric.Description(), - } - metadata = append(metadata, &entry) - } - } - } - - return metadata -} diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 4ae60217a4b..66f35dd6365 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -1847,7 +1847,7 @@ func (i *Ingester) MetricsForLabelMatchers(ctx context.Context, req *client.Metr Metric: make([]*mimirpb.Metric, 0), } - mergedSet := storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) + mergedSet := storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge) for mergedSet.Next() { // Interrupt if the context has been canceled. if ctx.Err() != nil { diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index a6f17182594..1871cc5e8e0 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -488,7 +488,7 @@ func (q *blocksStoreQuerier) selectSorted(ctx context.Context, sp *storage.Selec } return series.NewSeriesSetWithWarnings( - storage.NewMergeSeriesSet(resSeriesSets, storage.ChainedSeriesMerge), + storage.NewMergeSeriesSet(resSeriesSets, 0, storage.ChainedSeriesMerge), resWarnings) } diff --git a/pkg/querier/distributor_queryable.go b/pkg/querier/distributor_queryable.go index 3a3c4b315aa..2fa028dd914 100644 --- a/pkg/querier/distributor_queryable.go +++ b/pkg/querier/distributor_queryable.go @@ -200,7 +200,7 @@ func (q *distributorQuerier) streamingSelect(ctx context.Context, minT, maxT int return sets[0] } // Sets need to be sorted. Both series.NewConcreteSeriesSetFromUnsortedSeries and newTimeSeriesSeriesSet take care of that. - return storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) + return storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge) } func (q *distributorQuerier) LabelValues(ctx context.Context, name string, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index cae5138ff86..92640fd2b83 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -550,7 +550,7 @@ func (mq multiQuerier) mergeSeriesSets(sets []storage.SeriesSet) storage.SeriesS } if len(chunks) == 0 { - return storage.NewMergeSeriesSet(otherSets, storage.ChainedSeriesMerge) + return storage.NewMergeSeriesSet(otherSets, 0, storage.ChainedSeriesMerge) } // partitionChunks returns set with sorted series, so it can be used by NewMergeSeriesSet @@ -561,7 +561,7 @@ func (mq multiQuerier) mergeSeriesSets(sets []storage.SeriesSet) storage.SeriesS } otherSets = append(otherSets, chunksSet) - return storage.NewMergeSeriesSet(otherSets, storage.ChainedSeriesMerge) + return storage.NewMergeSeriesSet(otherSets, 0, storage.ChainedSeriesMerge) } type sliceSeriesSet struct { diff --git a/pkg/querier/tenantfederation/merge_queryable.go b/pkg/querier/tenantfederation/merge_queryable.go index c01da5e2871..c5e6a664d20 100644 --- a/pkg/querier/tenantfederation/merge_queryable.go +++ b/pkg/querier/tenantfederation/merge_queryable.go @@ -378,7 +378,7 @@ func (m *mergeQuerier) Select(ctx context.Context, sortSeries bool, hints *stora return storage.ErrSeriesSet(err) } - return storage.NewMergeSeriesSet(seriesSets, storage.ChainedSeriesMerge) + return storage.NewMergeSeriesSet(seriesSets, 0, storage.ChainedSeriesMerge) } type addLabelsSeriesSet struct { diff --git a/pkg/streamingpromql/testdata/ours/binary_operators.test b/pkg/streamingpromql/testdata/ours/binary_operators.test index 7a0bcb69d05..72f3f130d1a 100644 --- a/pkg/streamingpromql/testdata/ours/binary_operators.test +++ b/pkg/streamingpromql/testdata/ours/binary_operators.test @@ -276,16 +276,16 @@ load 1m another_mixed{job="test"} 10 {{schema:0 sum:12 count:6 buckets:[2 4 6]}} {{schema:0 sum:12 count:6 buckets:[2 4 6]}} 4 5 {{schema:0 sum:12 count:6 buckets:[2 4 6]}} # @0 @5m @10m @15m @20m @25m -eval range from 0 to 5m step 1m mixed_metric + another_mixed +eval_info range from 0 to 5m step 1m mixed_metric + another_mixed {job="test"} 20 11 12 7 _ {{schema:0 sum:24 count:12 buckets:[4 8 12]}} -eval range from 0 to 5m step 1m mixed_metric - another_mixed +eval_info range from 0 to 5m step 1m mixed_metric - another_mixed {job="test"} 0 -9 -8 -1 _ {{schema:0 sum:0 count:0}} -eval range from 0 to 5m step 1m mixed_metric * another_mixed +eval_info range from 0 to 5m step 1m mixed_metric * another_mixed {job="test"} 100 10 20 12 {{schema:0 sum:30 count:15 buckets:[5 10 15]}} _ -eval range from 0 to 5m step 1m mixed_metric / another_mixed +eval_info range from 0 to 5m step 1m mixed_metric / another_mixed {job="test"} 1 0.1 0.2 0.75 {{schema:0 sum:1.2 count:0.6 buckets:[0.2 0.4 0.6]}} _ clear @@ -317,18 +317,18 @@ eval range from 0m to 24m step 6m my_metric / 2 # Scalar on left side # Note that positive scalar / histogram == nothing. -eval range from 0m to 24m step 6m 2 / my_metric +eval_info range from 0m to 24m step 6m 2 / my_metric {histograms="both"} 0.4 0.2 _ {job="foo"} 2 1 0.6666666666666667 0.5 0.4 {job="bar"} 0.2 0.1 0.06666666666666667 _ 0.04 # Test other arithmetic operations. -eval range from 0m to 24m step 6m my_metric + 2 +eval_info range from 0m to 24m step 6m my_metric + 2 {histograms="both"} 7 12 _ {job="foo"} 3 4 5 6 7 {job="bar"} 12 22 32 _ 52 -eval range from 0m to 24m step 6m my_metric - 2 +eval_info range from 0m to 24m step 6m my_metric - 2 {histograms="both"} 3 8 {job="foo"} -1 0 1 2 3 {job="bar"} 8 18 28 _ 48 @@ -552,7 +552,7 @@ eval_info range from 0 to 24m step 6m left_histograms == 0 eval_info range from 0 to 24m step 6m left_histograms != 3 # No results. -eval range from 0 to 24m step 6m left_histograms != 0 +eval_info range from 0 to 24m step 6m left_histograms != 0 # No results. eval_info range from 0 to 24m step 6m left_histograms > 3 @@ -561,7 +561,7 @@ eval_info range from 0 to 24m step 6m left_histograms > 3 eval_info range from 0 to 24m step 6m left_histograms > 0 # No results. -eval range from 0 to 24m step 6m left_histograms >= 3 +eval_info range from 0 to 24m step 6m left_histograms >= 3 # No results. eval_info range from 0 to 24m step 6m left_histograms >= 0 @@ -576,7 +576,7 @@ eval_info range from 0 to 24m step 6m left_histograms < 0 eval_info range from 0 to 24m step 6m left_histograms <= 3 # No results. -eval range from 0 to 24m step 6m left_histograms <= 0 +eval_info range from 0 to 24m step 6m left_histograms <= 0 # No results. eval_info range from 0 to 24m step 6m left_histograms == bool 3 @@ -649,40 +649,40 @@ eval range from 0 to 60m step 6m NaN == left_floats eval range from 0 to 60m step 6m NaN == bool left_floats {} 0 0 _ _ 0 _ 0 0 0 0 0 -eval range from 0 to 24m step 6m 3 == left_histograms +eval_info range from 0 to 24m step 6m 3 == left_histograms # No results. -eval range from 0 to 24m step 6m 0 == left_histograms +eval_info range from 0 to 24m step 6m 0 == left_histograms # No results. -eval range from 0 to 24m step 6m 3 != left_histograms +eval_info range from 0 to 24m step 6m 3 != left_histograms # No results. -eval range from 0 to 24m step 6m 0 != left_histograms +eval_info range from 0 to 24m step 6m 0 != left_histograms # No results. -eval range from 0 to 24m step 6m 3 < left_histograms +eval_info range from 0 to 24m step 6m 3 < left_histograms # No results. -eval range from 0 to 24m step 6m 0 < left_histograms +eval_info range from 0 to 24m step 6m 0 < left_histograms # No results. -eval range from 0 to 24m step 6m 3 < left_histograms +eval_info range from 0 to 24m step 6m 3 < left_histograms # No results. -eval range from 0 to 24m step 6m 0 < left_histograms +eval_info range from 0 to 24m step 6m 0 < left_histograms # No results. -eval range from 0 to 24m step 6m 3 > left_histograms +eval_info range from 0 to 24m step 6m 3 > left_histograms # No results. -eval range from 0 to 24m step 6m 0 > left_histograms +eval_info range from 0 to 24m step 6m 0 > left_histograms # No results. -eval range from 0 to 24m step 6m 3 >= left_histograms +eval_info range from 0 to 24m step 6m 3 >= left_histograms # No results. -eval range from 0 to 24m step 6m 0 >= left_histograms +eval_info range from 0 to 24m step 6m 0 >= left_histograms # No results. # Scalar / scalar combinations diff --git a/pkg/streamingpromql/testdata/ours/functions.test b/pkg/streamingpromql/testdata/ours/functions.test index 95dc4f0a373..b6936bc23c3 100644 --- a/pkg/streamingpromql/testdata/ours/functions.test +++ b/pkg/streamingpromql/testdata/ours/functions.test @@ -4,22 +4,22 @@ # These test cases cover scenarios not covered by the upstream test cases, such as range queries, or edge cases that are uniquely likely to cause issues in the streaming engine. load 1m - some_metric{env="prod", cluster="eu"} 0+60x4 - some_metric{env="prod", cluster="us"} 0+120x4 - some_metric{env="test", cluster="eu"} 0+180x4 - some_metric{env="test", cluster="us"} 0+240x4 - some_metric_with_gaps 0 60 120 180 240 _ 2000 2120 2240 - some_metric_with_stale_marker 0 60 120 stale 240 300 + some_metric_count{env="prod", cluster="eu"} 0+60x4 + some_metric_count{env="prod", cluster="us"} 0+120x4 + some_metric_count{env="test", cluster="eu"} 0+180x4 + some_metric_count{env="test", cluster="us"} 0+240x4 + some_metric_with_gaps_total 0 60 120 180 240 _ 2000 2120 2240 + some_metric_with_stale_marker_sum 0 60 120 stale 240 300 # Range query with rate. -eval range from 0 to 4m step 1m rate(some_metric[1m1s]) +eval range from 0 to 4m step 1m rate(some_metric_count[1m1s]) {env="prod", cluster="eu"} _ 0.9836065573770493 1 1 1 {env="prod", cluster="us"} _ 1.9672131147540985 2 2 2 {env="test", cluster="eu"} _ 2.9508196721311477 3 3 3 {env="test", cluster="us"} _ 3.934426229508197 4 4 4 # Range query with increase. -eval range from 0 to 4m step 1m increase(some_metric[1m1s]) +eval range from 0 to 4m step 1m increase(some_metric_count[1m1s]) {env="prod", cluster="eu"} _ 60 61 61 61 {env="prod", cluster="us"} _ 120 122 122 122 {env="test", cluster="eu"} _ 180 183 183 183 @@ -40,102 +40,102 @@ eval range from 0 to 4m step 1m increase(some_nonexistent_metric[1m]) # # The first query below (with 1m) tests that we correctly skip evaluating rate() when there aren't enough points in the range. # The second query below (with 2m) tests that we correctly pick the last point from the buffer if the last point in the buffer is outside the range. -eval range from 0 to 8m step 1m rate(some_metric_with_gaps[1m1s]) +eval range from 0 to 8m step 1m rate(some_metric_with_gaps_total[1m1s]) {} _ 0.9836065573770493 1 1 1 _ _ 2 2 -eval range from 0 to 8m step 1m increase(some_metric_with_gaps[1m1s]) +eval range from 0 to 8m step 1m increase(some_metric_with_gaps_total[1m1s]) {} _ 60 61 61 61 _ _ 122 122 -eval range from 0 to 8m step 1m rate(some_metric_with_gaps[2m1s]) +eval range from 0 to 8m step 1m rate(some_metric_with_gaps_total[2m1s]) {} _ 0.49586776859504134 0.9917355371900827 1 1 1 14.666666666666666 2 2 -eval range from 0 to 8m step 1m increase(some_metric_with_gaps[2m1s]) +eval range from 0 to 8m step 1m increase(some_metric_with_gaps_total[2m1s]) {} _ 60 120 121 121 121 1774.6666666666665 242 242 # Test that we handle staleness markers correctly. -eval range from 0 to 5m step 1m rate(some_metric_with_stale_marker[2m1s]) +eval range from 0 to 5m step 1m rate(some_metric_with_stale_marker_sum[2m1s]) {} _ 0.49586776859504134 0.9917355371900827 1 1 1 -eval range from 0 to 5m step 1m increase(some_metric_with_stale_marker[2m1s]) +eval range from 0 to 5m step 1m increase(some_metric_with_stale_marker_sum[2m1s]) {} _ 60 120 121 121 121 clear # Test simple functions not covered by the upstream tests load 1m - some_metric{env="prod"} 0 0.5 -0.5 NaN -NaN 2.1 -2.1 + some_metric_count{env="prod"} 0 0.5 -0.5 NaN -NaN 2.1 -2.1 -eval range from 0 to 4m step 1m abs(some_metric) +eval range from 0 to 4m step 1m abs(some_metric_count) {env="prod"} 0 0.5 0.5 NaN NaN -eval range from 0 to 4m step 1m acos(some_metric) +eval range from 0 to 4m step 1m acos(some_metric_count) {env="prod"} 1.5707963267948966 1.0471975511965976 2.0943951023931957 NaN NaN -eval range from 0 to 4m step 1m asin(some_metric) +eval range from 0 to 4m step 1m asin(some_metric_count) {env="prod"} 0 0.5235987755982989 -0.5235987755982989 NaN NaN -eval range from 0 to 4m step 1m atanh(some_metric) +eval range from 0 to 4m step 1m atanh(some_metric_count) {env="prod"} 0 0.5493061443340548 -0.5493061443340548 NaN NaN -eval range from 0 to 6m step 1m ceil(some_metric) +eval range from 0 to 6m step 1m ceil(some_metric_count) {env="prod"} 0 1 -0 NaN -NaN 3 -2 -eval range from 0 to 6m step 1m floor(some_metric) +eval range from 0 to 6m step 1m floor(some_metric_count) {env="prod"} 0 0 -1 NaN -NaN 2 -3 clear load 1m - some_metric{foo="bar"} 0 1 2 3 _ _ {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} + some_metric_count{foo="bar"} 0 1 2 3 _ _ {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} some_nhcb_metric{baz="bar"} {{schema:-53 sum:1 count:5 custom_values:[5 10] buckets:[1 4]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:3 count:15 custom_values:[5 10] buckets:[7 8]}} some_inf_and_nan_metric{foo="baz"} 0 1 2 3 Inf Inf Inf NaN NaN NaN NaN 8 7 6 -eval range from 0 to 7m step 1m count_over_time(some_metric[3m1s]) +eval range from 0 to 7m step 1m count_over_time(some_metric_count[3m1s]) {foo="bar"} 1 2 3 4 3 2 2 2 -eval range from 0 to 7m step 1m count_over_time(some_metric[6s]) +eval range from 0 to 7m step 1m count_over_time(some_metric_count[6s]) {foo="bar"} 1 1 1 1 _ _ 1 1 -eval range from 0 to 7m step 1m last_over_time(some_metric[3m1s]) - some_metric{foo="bar"} 0 1 2 3 3 3 {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} +eval range from 0 to 7m step 1m last_over_time(some_metric_count[3m1s]) + some_metric_count{foo="bar"} 0 1 2 3 3 3 {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} -eval range from 0 to 7m step 1m last_over_time(some_metric[6s]) - some_metric{foo="bar"} 0 1 2 3 _ _ {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} +eval range from 0 to 7m step 1m last_over_time(some_metric_count[6s]) + some_metric_count{foo="bar"} 0 1 2 3 _ _ {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} -eval range from 0 to 7m step 1m present_over_time(some_metric[3m1s]) +eval range from 0 to 7m step 1m present_over_time(some_metric_count[3m1s]) {foo="bar"} 1 1 1 1 1 1 1 1 -eval range from 0 to 7m step 1m present_over_time(some_metric[6s]) +eval range from 0 to 7m step 1m present_over_time(some_metric_count[6s]) {foo="bar"} 1 1 1 1 _ _ 1 1 -eval range from 0 to 7m step 1m min_over_time(some_metric[3m1s]) +eval range from 0 to 7m step 1m min_over_time(some_metric_count[3m1s]) {foo="bar"} 0 0 0 0 1 2 3 _ -eval range from 0 to 7m step 1m min_over_time(some_metric[6s]) +eval range from 0 to 7m step 1m min_over_time(some_metric_count[6s]) {foo="bar"} 0 1 2 3 _ _ _ _ eval range from 0 to 16m step 1m min_over_time(some_inf_and_nan_metric[3m1s]) {foo="baz"} 0 0 0 0 1 2 3 Inf Inf Inf NaN 8 7 6 6 6 6 -eval range from 0 to 7m step 1m max_over_time(some_metric[3m1s]) +eval range from 0 to 7m step 1m max_over_time(some_metric_count[3m1s]) {foo="bar"} 0 1 2 3 3 3 3 _ -eval range from 0 to 7m step 1m max_over_time(some_metric[6s]) +eval range from 0 to 7m step 1m max_over_time(some_metric_count[6s]) {foo="bar"} 0 1 2 3 _ _ _ _ eval range from 0 to 16m step 1m max_over_time(some_inf_and_nan_metric[3m1s]) {foo="baz"} 0 1 2 3 Inf Inf Inf Inf Inf Inf NaN 8 8 8 8 7 6 -eval_warn range from 0 to 10m step 1m sum_over_time(some_metric[3m1s]) +eval_warn range from 0 to 10m step 1m sum_over_time(some_metric_count[3m1s]) {foo="bar"} 0 1 3 6 6 5 _ {{schema:3 sum:9 count:7 buckets:[3 7 5]}} {{schema:3 sum:9 count:7 buckets:[3 7 5]}} {{schema:3 sum:9 count:7 buckets:[3 7 5]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} -eval range from 0 to 5m step 1m sum_over_time(some_metric[3m1s]) +eval range from 0 to 5m step 1m sum_over_time(some_metric_count[3m1s]) {foo="bar"} 0 1 3 6 6 5 -eval range from 7m to 10m step 1m sum_over_time(some_metric[3m1s]) +eval range from 7m to 10m step 1m sum_over_time(some_metric_count[3m1s]) {foo="bar"} {{schema:3 sum:9 count:7 buckets:[3 7 5]}} {{schema:3 sum:9 count:7 buckets:[3 7 5]}} {{schema:3 sum:9 count:7 buckets:[3 7 5]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} -eval range from 0 to 7m step 1m sum_over_time(some_metric[6s]) +eval range from 0 to 7m step 1m sum_over_time(some_metric_count[6s]) {foo="bar"} 0 1 2 3 _ _ {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} eval range from 0 to 2m step 1m sum_over_time(some_nhcb_metric[3m1s]) @@ -144,16 +144,16 @@ eval range from 0 to 2m step 1m sum_over_time(some_nhcb_metric[3m1s]) eval range from 0 to 16m step 1m sum_over_time(some_inf_and_nan_metric[3m1s]) {foo="baz"} 0 1 3 6 Inf Inf Inf NaN NaN NaN NaN NaN NaN NaN 21 13 6 -eval_warn range from 0 to 10m step 1m avg_over_time(some_metric[3m1s]) +eval_warn range from 0 to 10m step 1m avg_over_time(some_metric_count[3m1s]) {foo="bar"} 0 0.5 1 1.5 2 2.5 _ {{schema:3 sum:4.5 count:3.5 buckets:[1.5 3.5 2.5]}} {{schema:3 sum:4.5 count:3.5 buckets:[1.5 3.5 2.5]}} {{schema:3 sum:4.5 count:3.5 buckets:[1.5 3.5 2.5]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} -eval range from 0 to 5m step 1m avg_over_time(some_metric[3m1s]) +eval range from 0 to 5m step 1m avg_over_time(some_metric_count[3m1s]) {foo="bar"} 0 0.5 1 1.5 2 2.5 -eval range from 7m to 10m step 1m avg_over_time(some_metric[3m1s]) +eval range from 7m to 10m step 1m avg_over_time(some_metric_count[3m1s]) {foo="bar"} {{schema:3 sum:4.5 count:3.5 buckets:[1.5 3.5 2.5]}} {{schema:3 sum:4.5 count:3.5 buckets:[1.5 3.5 2.5]}} {{schema:3 sum:4.5 count:3.5 buckets:[1.5 3.5 2.5]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} -eval range from 0 to 7m step 1m avg_over_time(some_metric[6s]) +eval range from 0 to 7m step 1m avg_over_time(some_metric_count[6s]) {foo="bar"} 0 1 2 3 _ _ {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} eval range from 0 to 2m step 1m avg_over_time(some_nhcb_metric[3m1s]) @@ -558,12 +558,12 @@ eval range from 0 to 20m step 1m deriv(metric[3m1s]) clear load 1m - some_metric{env="prod", cluster="eu"} _ _ _ 0+1x4 - some_metric{env="prod", cluster="us"} _ _ _ 0+2x4 - some_metric{env="prod", cluster="au"} _ _ _ {{count:5}}+{{count:5}}x4 + some_metric_count{env="prod", cluster="eu"} _ _ _ 0+1x4 + some_metric_count{env="prod", cluster="us"} _ _ _ 0+2x4 + some_metric_count{env="prod", cluster="au"} _ _ _ {{count:5}}+{{count:5}}x4 # Function over range vector with many steps at beginning of range with no samples. -eval range from 0 to 7m step 1m last_over_time(some_metric[3m]) - some_metric{env="prod", cluster="eu"} _ _ _ 0 1 2 3 4 - some_metric{env="prod", cluster="us"} _ _ _ 0 2 4 6 8 - some_metric{env="prod", cluster="au"} _ _ _ {{count:5}} {{count:10}} {{count:15}} {{count:20}} {{count:25}} +eval range from 0 to 7m step 1m last_over_time(some_metric_count[3m]) + some_metric_count{env="prod", cluster="eu"} _ _ _ 0 1 2 3 4 + some_metric_count{env="prod", cluster="us"} _ _ _ 0 2 4 6 8 + some_metric_count{env="prod", cluster="au"} _ _ _ {{count:5}} {{count:10}} {{count:15}} {{count:20}} {{count:25}} diff --git a/pkg/streamingpromql/testdata/ours/histograms.test b/pkg/streamingpromql/testdata/ours/histograms.test index 33d89bc8bc9..788f8245e28 100644 --- a/pkg/streamingpromql/testdata/ours/histograms.test +++ b/pkg/streamingpromql/testdata/ours/histograms.test @@ -8,7 +8,7 @@ load 6m series{le="+Inf"} 8 series{le="2000"} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} -eval instant at 0m histogram_quantile(0.8, series) +eval_info instant at 0m histogram_quantile(0.8, series) {} 595 {le="2000"} 2.29739670999407 @@ -80,16 +80,16 @@ clear # Test various mixed metric scenarios load 6m series{host="a", le="0.1"} 2 _ 1 {{schema:0 sum:5 count:4 buckets:[2 2 2]}} - series{host="a", le="1"} 1 _ 2 {{schema:0 sum:5 count:4 buckets:[1 5 1]}} + series{host="a", le="1"} 3 _ 2 {{schema:0 sum:5 count:4 buckets:[1 5 1]}} series{host="a", le="10"} 5 _ 3 _ {{schema:0 sum:5 count:4 buckets:[5 2 5]}} - series{host="a", le="100"} 4 _ 9 _ {{schema:0 sum:1 count:3 buckets:[6 6 2]}} - series{host="a", le="1000"} 9 _ 5 - series{host="a", le="+Inf"} 8 _ 6 + series{host="a", le="100"} 6 _ 4 _ {{schema:0 sum:1 count:3 buckets:[6 6 2]}} + series{host="a", le="1000"} 8 _ 5 + series{host="a", le="+Inf"} 9 _ 6 series{host="a"} {{schema:0 sum:5 count:4 buckets:[9 2 1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} _ _ _ series{host="b"} 1 {{schema:0 sum:5 count:4 buckets:[0 3 1]}} {{schema:0 sum:5 count:4 buckets:[3 3 1]}} _ eval_warn range from 0m to 24m step 6m histogram_quantile(0.8, series) - {host="a"} _ 2.29739670999407 73 + {host="a"} _ 2.29739670999407 820.0000000000007 {host="a", le="0.1"} _ _ _ 3.0314331330207964 {host="a", le="1"} _ _ _ 2.29739670999407 {host="a", le="10"} _ _ _ _ 3.1166583186419996 @@ -97,7 +97,7 @@ eval_warn range from 0m to 24m step 6m histogram_quantile(0.8, series) {host="b"} _ 2.29739670999407 2.29739670999407 eval_warn range from 0m to 12m step 6m histogram_quantile(0.8, series{host="a"}) - {host="a"} _ 2.29739670999407 73 + {host="a"} _ 2.29739670999407 820.0000000000007 eval_warn range from 0m to 24m step 6m histogram_quantile(0.8, series{host="b"}) {host="b"} _ 2.29739670999407 2.29739670999407 @@ -118,7 +118,7 @@ load 6m notEnoughObservations{le="1"} 0 1 notEnoughObservations{le="+Inf"} 0 2 -eval range from 0m to 6m step 6m histogram_quantile(0.8, series) +eval_info range from 0m to 6m step 6m histogram_quantile(0.8, series) {} 4.800000000000001 NaN eval range from 0m to 6m step 6m histogram_quantile(0.8, noInfinity) diff --git a/pkg/streamingpromql/testdata/ours/native_histograms.test b/pkg/streamingpromql/testdata/ours/native_histograms.test index 8282fbc2e04..a9902b2a3d2 100644 --- a/pkg/streamingpromql/testdata/ours/native_histograms.test +++ b/pkg/streamingpromql/testdata/ours/native_histograms.test @@ -164,16 +164,16 @@ clear # Test mixed metrics and range query load 1m - incr_histogram 1 2 {{schema:3 sum:4 count:4 buckets:[1 2 1]}}+{{schema:5 sum:2 count:1 buckets:[1] offset:1}}x3 + incr_histogram_sum 1 2 {{schema:3 sum:4 count:4 buckets:[1 2 1]}}+{{schema:5 sum:2 count:1 buckets:[1] offset:1}}x3 # - The first value isn't enough to calculate a rate/increase # - The second value is a rate/increase from two floats # - The third value is a rate/increase across a float and histogram (so no value returned) # - The remaining values contain the rate/increase across two histograms in the vector -eval_warn range from 0 to 4m step 1m rate(incr_histogram[1m1s]) +eval_warn range from 0 to 4m step 1m rate(incr_histogram_sum[1m1s]) {} _ 0.016666666666666666 _ {{schema:3 count:0.016666666666666666 sum:0.03333333333333333 offset:1 buckets:[0.016666666666666666]}} {{schema:3 count:0.016666666666666666 sum:0.03333333333333333 offset:1 buckets:[0.016666666666666666]}} -eval_warn range from 0 to 4m step 1m increase(incr_histogram[1m1s]) +eval_warn range from 0 to 4m step 1m increase(incr_histogram_sum[1m1s]) {} _ 1.0166666666666666 _ {{schema:3 count:1.0166666666666666 sum:2.033333333333333 offset:1 buckets:[1.0166666666666666]}} {{schema:3 count:1.0166666666666666 sum:2.033333333333333 offset:1 buckets:[1.0166666666666666]}} clear diff --git a/pkg/streamingpromql/testdata/upstream/aggregators.test b/pkg/streamingpromql/testdata/upstream/aggregators.test index 39434a9797e..61a666fe10e 100644 --- a/pkg/streamingpromql/testdata/upstream/aggregators.test +++ b/pkg/streamingpromql/testdata/upstream/aggregators.test @@ -369,20 +369,22 @@ load 5m version{job="app-server", instance="1", group="production"} 6 version{job="app-server", instance="0", group="canary"} 7 version{job="app-server", instance="1", group="canary"} 7 + version{job="app-server", instance="2", group="canary"} {{schema:0 sum:10 count:20 z_bucket_w:0.001 z_bucket:2 buckets:[1 2] n_buckets:[1 2]}} + version{job="app-server", instance="3", group="canary"} {{schema:0 sum:10 count:20 z_bucket_w:0.001 z_bucket:2 buckets:[1 2] n_buckets:[1 2]}} # Unsupported by streaming engine. # eval instant at 1m count_values("version", version) # {version="6"} 5 # {version="7"} 2 # {version="8"} 2 - +# {version="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}"} 2 # Unsupported by streaming engine. # eval instant at 1m count_values(((("version"))), version) -# {version="6"} 5 -# {version="7"} 2 -# {version="8"} 2 - +# {version="6"} 5 +# {version="7"} 2 +# {version="8"} 2 +# {version="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}"} 2 # Unsupported by streaming engine. # eval instant at 1m count_values without (instance)("version", version) @@ -390,6 +392,7 @@ load 5m # {job="api-server", group="canary", version="8"} 2 # {job="app-server", group="production", version="6"} 2 # {job="app-server", group="canary", version="7"} 2 +# {job="app-server", group="canary", version="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}"} 2 # Overwrite label with output. Don't do this. # Unsupported by streaming engine. @@ -397,6 +400,7 @@ load 5m # {job="6", group="production"} 5 # {job="8", group="canary"} 2 # {job="7", group="canary"} 2 +# {job="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}", group="canary"} 2 # Overwrite label with output. Don't do this. # Unsupported by streaming engine. @@ -404,7 +408,7 @@ load 5m # {job="6", group="production"} 5 # {job="8", group="canary"} 2 # {job="7", group="canary"} 2 - +# {job="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}", group="canary"} 2 # Tests for quantile. clear @@ -470,12 +474,14 @@ load 10s data{test="uneven samples",point="a"} 0 data{test="uneven samples",point="b"} 1 data{test="uneven samples",point="c"} 4 + data{test="histogram sample",point="c"} {{schema:0 sum:0 count:0}} foo .8 eval instant at 1m group without(point)(data) {test="two samples"} 1 {test="three samples"} 1 {test="uneven samples"} 1 + {test="histogram sample"} 1 eval instant at 1m group(foo) {} 1 @@ -647,11 +653,11 @@ eval_info instant at 0m stddev({label="c"}) eval_info instant at 0m stdvar({label="c"}) -eval instant at 0m stddev by (label) (series) +eval_info instant at 0m stddev by (label) (series) {label="a"} 0 {label="b"} 0 -eval instant at 0m stdvar by (label) (series) +eval_info instant at 0m stdvar by (label) (series) {label="a"} 0 {label="b"} 0 @@ -662,17 +668,17 @@ load 5m series{label="b"} 1 series{label="c"} 2 -eval instant at 0m stddev(series) +eval_info instant at 0m stddev(series) {} 0.5 -eval instant at 0m stdvar(series) +eval_info instant at 0m stdvar(series) {} 0.25 -eval instant at 0m stddev by (label) (series) +eval_info instant at 0m stddev by (label) (series) {label="b"} 0 {label="c"} 0 -eval instant at 0m stdvar by (label) (series) +eval_info instant at 0m stdvar by (label) (series) {label="b"} 0 {label="c"} 0 diff --git a/pkg/streamingpromql/testdata/upstream/at_modifier.test b/pkg/streamingpromql/testdata/upstream/at_modifier.test index da939526531..e2d24bb1fbe 100644 --- a/pkg/streamingpromql/testdata/upstream/at_modifier.test +++ b/pkg/streamingpromql/testdata/upstream/at_modifier.test @@ -95,7 +95,8 @@ eval instant at 25s sum_over_time(metric{job="1"}[100] offset 50s @ 100) eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100 {job="1"} 15 -eval instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metric{job="2"}[123s] @ 200), "job", "1", "", "") +# Note that this triggers an info annotation because we are rate'ing a metric that does not end in `_total`. +eval_info instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metric{job="2"}[123s] @ 200), "job", "1", "", "") {job="1"} 0.3 eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "") diff --git a/pkg/streamingpromql/testdata/upstream/functions.test b/pkg/streamingpromql/testdata/upstream/functions.test index a0bd263205f..3ce206490f3 100644 --- a/pkg/streamingpromql/testdata/upstream/functions.test +++ b/pkg/streamingpromql/testdata/upstream/functions.test @@ -88,13 +88,13 @@ clear # Tests for increase(). load 5m - http_requests{path="/foo"} 0+10x10 - http_requests{path="/bar"} 0+18x5 0+18x5 - http_requests{path="/dings"} 10+10x10 - http_requests{path="/bumms"} 1+10x10 + http_requests_total{path="/foo"} 0+10x10 + http_requests_total{path="/bar"} 0+18x5 0+18x5 + http_requests_total{path="/dings"} 10+10x10 + http_requests_total{path="/bumms"} 1+10x10 # Tests for increase(). -eval instant at 50m increase(http_requests[50m]) +eval instant at 50m increase(http_requests_total[50m]) {path="/foo"} 100 {path="/bar"} 160 {path="/dings"} 100 @@ -107,7 +107,7 @@ eval instant at 50m increase(http_requests[50m]) # chosen. However, "bumms" has value 1 at t=0 and would reach 0 at # t=-30s. Here the extrapolation to t=-2m30s would reach a negative # value, and therefore the extrapolation happens only by 30s. -eval instant at 50m increase(http_requests[100m]) +eval instant at 50m increase(http_requests_total[100m]) {path="/foo"} 100 {path="/bar"} 162 {path="/dings"} 105 @@ -120,57 +120,57 @@ clear # So the sequence 3 2 (decreasing counter = reset) is interpreted the same as 3 0 1 2. # Prometheus assumes it missed the intermediate values 0 and 1. load 5m - http_requests{path="/foo"} 0 1 2 3 2 3 4 + http_requests_total{path="/foo"} 0 1 2 3 2 3 4 -eval instant at 30m increase(http_requests[30m]) +eval instant at 30m increase(http_requests_total[30m]) {path="/foo"} 7 clear # Tests for rate(). load 5m - testcounter_reset_middle 0+27x4 0+27x5 - testcounter_reset_end 0+10x9 0 10 + testcounter_reset_middle_total 0+27x4 0+27x5 + testcounter_reset_end_total 0+10x9 0 10 # Counter resets at in the middle of range are handled correctly by rate(). -eval instant at 50m rate(testcounter_reset_middle[50m]) +eval instant at 50m rate(testcounter_reset_middle_total[50m]) {} 0.08 # Counter resets at end of range are ignored by rate(). -eval instant at 50m rate(testcounter_reset_end[5m]) +eval instant at 50m rate(testcounter_reset_end_total[5m]) -eval instant at 50m rate(testcounter_reset_end[6m]) +eval instant at 50m rate(testcounter_reset_end_total[6m]) {} 0 clear load 5m - calculate_rate_offset{x="a"} 0+10x10 - calculate_rate_offset{x="b"} 0+20x10 - calculate_rate_window 0+80x10 + calculate_rate_offset_total{x="a"} 0+10x10 + calculate_rate_offset_total{x="b"} 0+20x10 + calculate_rate_window_total 0+80x10 # Rates should calculate per-second rates. -eval instant at 50m rate(calculate_rate_window[50m]) +eval instant at 50m rate(calculate_rate_window_total[50m]) {} 0.26666666666666666 -eval instant at 50m rate(calculate_rate_offset[10m] offset 5m) +eval instant at 50m rate(calculate_rate_offset_total[10m] offset 5m) {x="a"} 0.03333333333333333 {x="b"} 0.06666666666666667 clear load 4m - testcounter_zero_cutoff{start="0m"} 0+240x10 - testcounter_zero_cutoff{start="1m"} 60+240x10 - testcounter_zero_cutoff{start="2m"} 120+240x10 - testcounter_zero_cutoff{start="3m"} 180+240x10 - testcounter_zero_cutoff{start="4m"} 240+240x10 - testcounter_zero_cutoff{start="5m"} 300+240x10 + testcounter_zero_cutoff_total{start="0m"} 0+240x10 + testcounter_zero_cutoff_total{start="1m"} 60+240x10 + testcounter_zero_cutoff_total{start="2m"} 120+240x10 + testcounter_zero_cutoff_total{start="3m"} 180+240x10 + testcounter_zero_cutoff_total{start="4m"} 240+240x10 + testcounter_zero_cutoff_total{start="5m"} 300+240x10 # Zero cutoff for left-side extrapolation happens until we # reach half a sampling interval (2m). Beyond that, we only # extrapolate by half a sampling interval. -eval instant at 10m rate(testcounter_zero_cutoff[20m]) +eval instant at 10m rate(testcounter_zero_cutoff_total[20m]) {start="0m"} 0.5 {start="1m"} 0.55 {start="2m"} 0.6 @@ -179,7 +179,7 @@ eval instant at 10m rate(testcounter_zero_cutoff[20m]) {start="5m"} 0.6 # Normal half-interval cutoff for left-side extrapolation. -eval instant at 50m rate(testcounter_zero_cutoff[20m]) +eval instant at 50m rate(testcounter_zero_cutoff_total[20m]) {start="0m"} 0.6 {start="1m"} 0.6 {start="2m"} 0.6 @@ -191,17 +191,17 @@ clear # Tests for irate(). load 5m - http_requests{path="/foo"} 0+10x10 - http_requests{path="/bar"} 0+10x5 0+10x5 + http_requests_total{path="/foo"} 0+10x10 + http_requests_total{path="/bar"} 0+10x5 0+10x5 # Unsupported by streaming engine. -# eval instant at 50m irate(http_requests[50m]) +# eval instant at 50m irate(http_requests_total[50m]) # {path="/foo"} .03333333333333333333 # {path="/bar"} .03333333333333333333 # Counter reset. # Unsupported by streaming engine. -# eval instant at 30m irate(http_requests[50m]) +# eval instant at 30m irate(http_requests_total[50m]) # {path="/foo"} .03333333333333333333 # {path="/bar"} 0 @@ -233,18 +233,18 @@ clear # Tests for deriv() and predict_linear(). load 5m - testcounter_reset_middle 0+10x4 0+10x5 - http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + testcounter_reset_middle_total 0+10x4 0+10x5 + http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10 # deriv should return the same as rate in simple cases. -eval instant at 50m rate(http_requests{group="canary", instance="1", job="app-server"}[50m]) +eval instant at 50m rate(http_requests_total{group="canary", instance="1", job="app-server"}[50m]) {group="canary", instance="1", job="app-server"} 0.26666666666666666 -eval instant at 50m deriv(http_requests{group="canary", instance="1", job="app-server"}[50m]) +eval instant at 50m deriv(http_requests_total{group="canary", instance="1", job="app-server"}[50m]) {group="canary", instance="1", job="app-server"} 0.26666666666666666 # deriv should return correct result. -eval instant at 50m deriv(testcounter_reset_middle[100m]) +eval instant at 50m deriv(testcounter_reset_middle_total[100m]) {} 0.010606060606060607 # predict_linear should return correct result. @@ -262,37 +262,37 @@ eval instant at 50m deriv(testcounter_reset_middle[100m]) # intercept at t=3000: 38.63636363636364 # intercept at t=3000+3600: 76.81818181818181 # Unsupported by streaming engine. -# eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600) +# eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 3600) # {} 70 # Unsupported by streaming engine. -# eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h) +# eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 1h) # {} 70 # intercept at t = 3000+3600 = 6600 # Unsupported by streaming engine. -# eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) +# eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) # {} 76.81818181818181 # Unsupported by streaming engine. -# eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 1h) +# eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 1h) # {} 76.81818181818181 # intercept at t = 600+3600 = 4200 # Unsupported by streaming engine. -# eval instant at 10m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) +# eval instant at 10m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) # {} 51.36363636363637 # intercept at t = 4200+3600 = 7800 # Unsupported by streaming engine. -# eval instant at 70m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) +# eval instant at 70m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) # {} 89.54545454545455 -# With http_requests, there is a sample value exactly at the end of +# With http_requests_total, there is a sample value exactly at the end of # the range, and it has exactly the predicted value, so predict_linear # can be emulated with deriv. # Unsupported by streaming engine. -# eval instant at 50m predict_linear(http_requests[50m], 3600) - (http_requests + deriv(http_requests[50m]) * 3600) +# eval instant at 50m predict_linear(http_requests_total[50m], 3600) - (http_requests_total + deriv(http_requests_total[50m]) * 3600) # {group="canary", instance="1", job="app-server"} 0 clear @@ -1169,19 +1169,19 @@ clear # Testdata for absent_over_time() # Unsupported by streaming engine. -# eval instant at 1m absent_over_time(http_requests[5m]) +# eval instant at 1m absent_over_time(http_requests_total[5m]) # {} 1 # Unsupported by streaming engine. -# eval instant at 1m absent_over_time(http_requests{handler="/foo"}[5m]) +# eval instant at 1m absent_over_time(http_requests_total{handler="/foo"}[5m]) # {handler="/foo"} 1 # Unsupported by streaming engine. -# eval instant at 1m absent_over_time(http_requests{handler!="/foo"}[5m]) +# eval instant at 1m absent_over_time(http_requests_total{handler!="/foo"}[5m]) # {} 1 # Unsupported by streaming engine. -# eval instant at 1m absent_over_time(http_requests{handler="/foo", handler="/bar", handler="/foobar"}[5m]) +# eval instant at 1m absent_over_time(http_requests_total{handler="/foo", handler="/bar", handler="/foobar"}[5m]) # {} 1 # Unsupported by streaming engine. @@ -1189,21 +1189,21 @@ clear # {} 1 # Unsupported by streaming engine. -# eval instant at 1m absent_over_time(http_requests{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) +# eval instant at 1m absent_over_time(http_requests_total{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) # {instance="127.0.0.1"} 1 load 1m - http_requests{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10 - http_requests{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10 + http_requests_total{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10 + http_requests_total{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10 httpd_handshake_failures_total{instance="127.0.0.1",job="node"} 1+1x15 httpd_log_lines_total{instance="127.0.0.1",job="node"} 1 ssl_certificate_expiry_seconds{job="ingress"} NaN NaN NaN NaN NaN # Unsupported by streaming engine. -# eval instant at 5m absent_over_time(http_requests[5m]) +# eval instant at 5m absent_over_time(http_requests_total[5m]) # Unsupported by streaming engine. -# eval instant at 5m absent_over_time(rate(http_requests[5m])[5m:1m]) +# eval instant at 5m absent_over_time(rate(http_requests_total[5m])[5m:1m]) # Unsupported by streaming engine. # eval instant at 0m absent_over_time(httpd_log_lines_total[30s]) @@ -1213,18 +1213,18 @@ load 1m # {} 1 # Unsupported by streaming engine. -# eval instant at 15m absent_over_time(http_requests[5m]) +# eval instant at 15m absent_over_time(http_requests_total[5m]) # {} 1 # Unsupported by streaming engine. -# eval instant at 15m absent_over_time(http_requests[10m]) +# eval instant at 15m absent_over_time(http_requests_total[10m]) # Unsupported by streaming engine. -# eval instant at 16m absent_over_time(http_requests[6m]) +# eval instant at 16m absent_over_time(http_requests_total[6m]) # {} 1 # Unsupported by streaming engine. -# eval instant at 16m absent_over_time(http_requests[16m]) +# eval instant at 16m absent_over_time(http_requests_total[16m]) # Unsupported by streaming engine. # eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m]) @@ -1261,30 +1261,30 @@ load 1m clear # Testdata for present_over_time() -eval instant at 1m present_over_time(http_requests[5m]) +eval instant at 1m present_over_time(http_requests_total[5m]) -eval instant at 1m present_over_time(http_requests{handler="/foo"}[5m]) +eval instant at 1m present_over_time(http_requests_total{handler="/foo"}[5m]) -eval instant at 1m present_over_time(http_requests{handler!="/foo"}[5m]) +eval instant at 1m present_over_time(http_requests_total{handler!="/foo"}[5m]) -eval instant at 1m present_over_time(http_requests{handler="/foo", handler="/bar", handler="/foobar"}[5m]) +eval instant at 1m present_over_time(http_requests_total{handler="/foo", handler="/bar", handler="/foobar"}[5m]) eval instant at 1m present_over_time(rate(nonexistant[5m])[5m:]) -eval instant at 1m present_over_time(http_requests{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) +eval instant at 1m present_over_time(http_requests_total{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) load 1m - http_requests{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10 - http_requests{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10 + http_requests_total{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10 + http_requests_total{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10 httpd_handshake_failures_total{instance="127.0.0.1",job="node"} 1+1x15 httpd_log_lines_total{instance="127.0.0.1",job="node"} 1 ssl_certificate_expiry_seconds{job="ingress"} NaN NaN NaN NaN NaN -eval instant at 5m present_over_time(http_requests[5m]) +eval instant at 5m present_over_time(http_requests_total[5m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 -eval instant at 5m present_over_time(rate(http_requests[5m])[5m:1m]) +eval instant at 5m present_over_time(rate(http_requests_total[5m])[5m:1m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 @@ -1293,15 +1293,15 @@ eval instant at 0m present_over_time(httpd_log_lines_total[30s]) eval instant at 1m present_over_time(httpd_log_lines_total[30s]) -eval instant at 15m present_over_time(http_requests[5m]) +eval instant at 15m present_over_time(http_requests_total[5m]) -eval instant at 15m present_over_time(http_requests[10m]) +eval instant at 15m present_over_time(http_requests_total[10m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 -eval instant at 16m present_over_time(http_requests[6m]) +eval instant at 16m present_over_time(http_requests_total[6m]) -eval instant at 16m present_over_time(http_requests[16m]) +eval instant at 16m present_over_time(http_requests_total[16m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 diff --git a/pkg/streamingpromql/testdata/upstream/histograms.test b/pkg/streamingpromql/testdata/upstream/histograms.test index d1530926347..100c2447f21 100644 --- a/pkg/streamingpromql/testdata/upstream/histograms.test +++ b/pkg/streamingpromql/testdata/upstream/histograms.test @@ -459,14 +459,14 @@ load 5m nonmonotonic_bucket{le="1000"} 0+9x10 nonmonotonic_bucket{le="+Inf"} 0+8x10 -# Nonmonotonic buckets -eval instant at 50m histogram_quantile(0.01, nonmonotonic_bucket) +# Nonmonotonic buckets, triggering an info annotation. +eval_info instant at 50m histogram_quantile(0.01, nonmonotonic_bucket) {} 0.0045 -eval instant at 50m histogram_quantile(0.5, nonmonotonic_bucket) +eval_info instant at 50m histogram_quantile(0.5, nonmonotonic_bucket) {} 8.5 -eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) +eval_info instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) {} 979.75 # Buckets with different representations of the same upper bound. diff --git a/pkg/streamingpromql/testdata/upstream/name_label_dropping.test b/pkg/streamingpromql/testdata/upstream/name_label_dropping.test index 4e58d2644fe..abddee6ec27 100644 --- a/pkg/streamingpromql/testdata/upstream/name_label_dropping.test +++ b/pkg/streamingpromql/testdata/upstream/name_label_dropping.test @@ -5,97 +5,97 @@ # Test for __name__ label drop. load 5m - metric{env="1"} 0 60 120 - another_metric{env="1"} 60 120 180 + metric_total{env="1"} 0 60 120 + another_metric_total{env="1"} 60 120 180 -# Does not drop __name__ for vector selector -eval instant at 10m metric{env="1"} - metric{env="1"} 120 +# Does not drop __name__ for vector selector. +eval instant at 10m metric_total{env="1"} + metric_total{env="1"} 120 -# Drops __name__ for unary operators -eval instant at 10m -metric +# Drops __name__ for unary operators. +eval instant at 10m -metric_total {env="1"} -120 -# Drops __name__ for binary operators -eval instant at 10m metric + another_metric +# Drops __name__ for binary operators. +eval instant at 10m metric_total + another_metric_total {env="1"} 300 -# Does not drop __name__ for binary comparison operators -eval instant at 10m metric <= another_metric - metric{env="1"} 120 +# Does not drop __name__ for binary comparison operators. +eval instant at 10m metric_total <= another_metric_total + metric_total{env="1"} 120 -# Drops __name__ for binary comparison operators with "bool" modifier -eval instant at 10m metric <= bool another_metric +# Drops __name__ for binary comparison operators with "bool" modifier. +eval instant at 10m metric_total <= bool another_metric_total {env="1"} 1 -# Drops __name__ for vector-scalar operations -eval instant at 10m metric * 2 +# Drops __name__ for vector-scalar operations. +eval instant at 10m metric_total * 2 {env="1"} 240 -# Drops __name__ for instant-vector functions -eval instant at 10m clamp(metric, 0, 100) +# Drops __name__ for instant-vector functions. +eval instant at 10m clamp(metric_total, 0, 100) {env="1"} 100 -# Drops __name__ for round function -eval instant at 10m round(metric) +# Drops __name__ for round function. +eval instant at 10m round(metric_total) {env="1"} 120 -# Drops __name__ for range-vector functions -eval instant at 10m rate(metric{env="1"}[10m]) +# Drops __name__ for range-vector functions. +eval instant at 10m rate(metric_total{env="1"}[10m]) {env="1"} 0.2 -# Does not drop __name__ for last_over_time function -eval instant at 10m last_over_time(metric{env="1"}[10m]) - metric{env="1"} 120 +# Does not drop __name__ for last_over_time function. +eval instant at 10m last_over_time(metric_total{env="1"}[10m]) + metric_total{env="1"} 120 -# Drops name for other _over_time functions -eval instant at 10m max_over_time(metric{env="1"}[10m]) +# Drops name for other _over_time functions. +eval instant at 10m max_over_time(metric_total{env="1"}[10m]) {env="1"} 120 -# Allows relabeling (to-be-dropped) __name__ via label_replace +# Allows relabeling (to-be-dropped) __name__ via label_replace. # Unsupported by streaming engine. # eval instant at 10m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)") -# {my_name="rate_metric", env="1"} 0.2 -# {my_name="rate_another_metric", env="1"} 0.2 +# {my_name="rate_metric_total", env="1"} 0.2 +# {my_name="rate_another_metric_total", env="1"} 0.2 -# Allows preserving __name__ via label_replace +# Allows preserving __name__ via label_replace. # Unsupported by streaming engine. # eval instant at 10m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)") -# rate_metric{env="1"} 0.2 -# rate_another_metric{env="1"} 0.2 +# rate_metric_total{env="1"} 0.2 +# rate_another_metric_total{env="1"} 0.2 -# Allows relabeling (to-be-dropped) __name__ via label_join +# Allows relabeling (to-be-dropped) __name__ via label_join. # Unsupported by streaming engine. # eval instant at 10m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__") -# {my_name="metric", env="1"} 0.2 -# {my_name="another_metric", env="1"} 0.2 +# {my_name="metric_total", env="1"} 0.2 +# {my_name="another_metric_total", env="1"} 0.2 -# Allows preserving __name__ via label_join +# Allows preserving __name__ via label_join. # Unsupported by streaming engine. # eval instant at 10m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env") -# metric_1{env="1"} 0.2 -# another_metric_1{env="1"} 0.2 +# metric_total_1{env="1"} 0.2 +# another_metric_total_1{env="1"} 0.2 -# Does not drop metric names fro aggregation operators -eval instant at 10m sum by (__name__, env) (metric{env="1"}) - metric{env="1"} 120 +# Does not drop metric names from aggregation operators. +eval instant at 10m sum by (__name__, env) (metric_total{env="1"}) + metric_total{env="1"} 120 -# Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label) +# Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label). # This is an accidental side effect of delayed __name__ label dropping # Unsupported by streaming engine. # eval_fail instant at 10m sum by (__name__) (rate({env="1"}[10m])) -# Aggregation operators aggregate metrics with same labelset and to-be-dropped names +# Aggregation operators aggregate metrics with same labelset and to-be-dropped names. # This is an accidental side effect of delayed __name__ label dropping # Unsupported by streaming engine. # eval instant at 10m sum(rate({env="1"}[10m])) by (env) # {env="1"} 0.4 -# Aggregationk operators propagate __name__ label dropping information +# Aggregationk operators propagate __name__ label dropping information. # Unsupported by streaming engine. -# eval instant at 10m topk(10, sum by (__name__, env) (metric{env="1"})) -# metric{env="1"} 120 +# eval instant at 10m topk(10, sum by (__name__, env) (metric_total{env="1"})) +# metric_total{env="1"} 120 # Unsupported by streaming engine. -# eval instant at 10m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m]))) +# eval instant at 10m topk(10, sum by (__name__, env) (rate(metric_total{env="1"}[10m]))) # {env="1"} 0.2 diff --git a/pkg/streamingpromql/testdata/upstream/native_histograms.test b/pkg/streamingpromql/testdata/upstream/native_histograms.test index 93b946d863a..275279cc4ea 100644 --- a/pkg/streamingpromql/testdata/upstream/native_histograms.test +++ b/pkg/streamingpromql/testdata/upstream/native_histograms.test @@ -1207,3 +1207,23 @@ eval instant at 3m avg_over_time(histogram_sum_over_time[4m:1m]) {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}} clear + +# Test native histograms with sub operator. +load 10m + histogram_sub_1{idx="0"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 + histogram_sub_1{idx="1"} {{schema:0 count:11 sum:1234.5 z_bucket:3 z_bucket_w:0.001 buckets:[0 2 1] n_buckets:[0 0 3 2]}}x1 + histogram_sub_2{idx="0"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 + histogram_sub_2{idx="1"} {{schema:1 count:11 sum:1234.5 z_bucket:3 z_bucket_w:0.001 buckets:[0 2 1] n_buckets:[0 0 3 2]}}x1 + histogram_sub_3{idx="0"} {{schema:1 count:11 sum:1234.5 z_bucket:3 z_bucket_w:0.001 buckets:[0 2 1] n_buckets:[0 0 3 2]}}x1 + histogram_sub_3{idx="1"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 + +eval instant at 10m histogram_sub_1{idx="0"} - ignoring(idx) histogram_sub_1{idx="1"} + {} {{schema:0 count:30 sum:1111.1 z_bucket:2 z_bucket_w:0.001 buckets:[1 1 0 2 1 1 1] n_buckets:[0 1 1 0 7 0 0 0 0 5 5 2]}} + +eval instant at 10m histogram_sub_2{idx="0"} - ignoring(idx) histogram_sub_2{idx="1"} + {} {{schema:0 count:30 sum:1111.1 z_bucket:2 z_bucket_w:0.001 buckets:[1 0 1 2 1 1 1] n_buckets:[0 -2 2 2 7 0 0 0 0 5 5 2]}} + +eval instant at 10m histogram_sub_3{idx="0"} - ignoring(idx) histogram_sub_3{idx="1"} + {} {{schema:0 count:-30 sum:-1111.1 z_bucket:-2 z_bucket_w:0.001 buckets:[-1 0 -1 -2 -1 -1 -1] n_buckets:[0 2 -2 -2 -7 0 0 0 0 -5 -5 -2]}} + +clear diff --git a/pkg/streamingpromql/testdata/upstream/operators.test b/pkg/streamingpromql/testdata/upstream/operators.test index 86c998a0da8..97aac16c642 100644 --- a/pkg/streamingpromql/testdata/upstream/operators.test +++ b/pkg/streamingpromql/testdata/upstream/operators.test @@ -4,14 +4,14 @@ # Provenance-includes-copyright: The Prometheus Authors load 5m - http_requests{job="api-server", instance="0", group="production"} 0+10x10 - http_requests{job="api-server", instance="1", group="production"} 0+20x10 - http_requests{job="api-server", instance="0", group="canary"} 0+30x10 - http_requests{job="api-server", instance="1", group="canary"} 0+40x10 - http_requests{job="app-server", instance="0", group="production"} 0+50x10 - http_requests{job="app-server", instance="1", group="production"} 0+60x10 - http_requests{job="app-server", instance="0", group="canary"} 0+70x10 - http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + http_requests_total{job="api-server", instance="0", group="production"} 0+10x10 + http_requests_total{job="api-server", instance="1", group="production"} 0+20x10 + http_requests_total{job="api-server", instance="0", group="canary"} 0+30x10 + http_requests_total{job="api-server", instance="1", group="canary"} 0+40x10 + http_requests_total{job="app-server", instance="0", group="production"} 0+50x10 + http_requests_total{job="app-server", instance="1", group="production"} 0+60x10 + http_requests_total{job="app-server", instance="0", group="canary"} 0+70x10 + http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10 http_requests_histogram{job="app-server", instance="1", group="production"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}}x11 load 5m @@ -20,21 +20,21 @@ load 5m vector_matching_b{l="x"} 0+4x25 -eval instant at 50m SUM(http_requests) BY (job) - COUNT(http_requests) BY (job) +eval instant at 50m SUM(http_requests_total) BY (job) - COUNT(http_requests_total) BY (job) {job="api-server"} 996 {job="app-server"} 2596 -eval instant at 50m 2 - SUM(http_requests) BY (job) +eval instant at 50m 2 - SUM(http_requests_total) BY (job) {job="api-server"} -998 {job="app-server"} -2598 -eval instant at 50m -http_requests{job="api-server",instance="0",group="production"} +eval instant at 50m -http_requests_total{job="api-server",instance="0",group="production"} {job="api-server",instance="0",group="production"} -100 -eval instant at 50m +http_requests{job="api-server",instance="0",group="production"} - http_requests{job="api-server",instance="0",group="production"} 100 +eval instant at 50m +http_requests_total{job="api-server",instance="0",group="production"} + http_requests_total{job="api-server",instance="0",group="production"} 100 -eval instant at 50m - - - SUM(http_requests) BY (job) +eval instant at 50m - - - SUM(http_requests_total) BY (job) {job="api-server"} -1000 {job="app-server"} -2600 @@ -47,83 +47,83 @@ eval instant at 50m -2^---1*3 eval instant at 50m 2/-2^---1*3+2 -10 -eval instant at 50m -10^3 * - SUM(http_requests) BY (job) ^ -1 +eval instant at 50m -10^3 * - SUM(http_requests_total) BY (job) ^ -1 {job="api-server"} 1 {job="app-server"} 0.38461538461538464 -eval instant at 50m 1000 / SUM(http_requests) BY (job) +eval instant at 50m 1000 / SUM(http_requests_total) BY (job) {job="api-server"} 1 {job="app-server"} 0.38461538461538464 -eval instant at 50m SUM(http_requests) BY (job) - 2 +eval instant at 50m SUM(http_requests_total) BY (job) - 2 {job="api-server"} 998 {job="app-server"} 2598 -eval instant at 50m SUM(http_requests) BY (job) % 3 +eval instant at 50m SUM(http_requests_total) BY (job) % 3 {job="api-server"} 1 {job="app-server"} 2 -eval instant at 50m SUM(http_requests) BY (job) % 0.3 +eval instant at 50m SUM(http_requests_total) BY (job) % 0.3 {job="api-server"} 0.1 {job="app-server"} 0.2 -eval instant at 50m SUM(http_requests) BY (job) ^ 2 +eval instant at 50m SUM(http_requests_total) BY (job) ^ 2 {job="api-server"} 1000000 {job="app-server"} 6760000 -eval instant at 50m SUM(http_requests) BY (job) % 3 ^ 2 +eval instant at 50m SUM(http_requests_total) BY (job) % 3 ^ 2 {job="api-server"} 1 {job="app-server"} 8 -eval instant at 50m SUM(http_requests) BY (job) % 2 ^ (3 ^ 2) +eval instant at 50m SUM(http_requests_total) BY (job) % 2 ^ (3 ^ 2) {job="api-server"} 488 {job="app-server"} 40 -eval instant at 50m SUM(http_requests) BY (job) % 2 ^ 3 ^ 2 +eval instant at 50m SUM(http_requests_total) BY (job) % 2 ^ 3 ^ 2 {job="api-server"} 488 {job="app-server"} 40 -eval instant at 50m SUM(http_requests) BY (job) % 2 ^ 3 ^ 2 ^ 2 +eval instant at 50m SUM(http_requests_total) BY (job) % 2 ^ 3 ^ 2 ^ 2 {job="api-server"} 1000 {job="app-server"} 2600 -eval instant at 50m COUNT(http_requests) BY (job) ^ COUNT(http_requests) BY (job) +eval instant at 50m COUNT(http_requests_total) BY (job) ^ COUNT(http_requests_total) BY (job) {job="api-server"} 256 {job="app-server"} 256 -eval instant at 50m SUM(http_requests) BY (job) / 0 +eval instant at 50m SUM(http_requests_total) BY (job) / 0 {job="api-server"} +Inf {job="app-server"} +Inf -eval instant at 50m http_requests{group="canary", instance="0", job="api-server"} / 0 +eval instant at 50m http_requests_total{group="canary", instance="0", job="api-server"} / 0 {group="canary", instance="0", job="api-server"} +Inf -eval instant at 50m -1 * http_requests{group="canary", instance="0", job="api-server"} / 0 +eval instant at 50m -1 * http_requests_total{group="canary", instance="0", job="api-server"} / 0 {group="canary", instance="0", job="api-server"} -Inf -eval instant at 50m 0 * http_requests{group="canary", instance="0", job="api-server"} / 0 +eval instant at 50m 0 * http_requests_total{group="canary", instance="0", job="api-server"} / 0 {group="canary", instance="0", job="api-server"} NaN -eval instant at 50m 0 * http_requests{group="canary", instance="0", job="api-server"} % 0 +eval instant at 50m 0 * http_requests_total{group="canary", instance="0", job="api-server"} % 0 {group="canary", instance="0", job="api-server"} NaN -eval instant at 50m SUM(http_requests) BY (job) + SUM(http_requests) BY (job) +eval instant at 50m SUM(http_requests_total) BY (job) + SUM(http_requests_total) BY (job) {job="api-server"} 2000 {job="app-server"} 5200 -eval instant at 50m (SUM((http_requests)) BY (job)) + SUM(http_requests) BY (job) +eval instant at 50m (SUM((http_requests_total)) BY (job)) + SUM(http_requests_total) BY (job) {job="api-server"} 2000 {job="app-server"} 5200 -eval instant at 50m http_requests{job="api-server", group="canary"} - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="1", job="api-server"} 400 +eval instant at 50m http_requests_total{job="api-server", group="canary"} + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="1", job="api-server"} 400 -eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[10m]) * 5 * 60 +eval instant at 50m http_requests_total{job="api-server", group="canary"} + rate(http_requests_total{job="api-server"}[10m]) * 5 * 60 {group="canary", instance="0", job="api-server"} 330 {group="canary", instance="1", job="api-server"} 440 -eval instant at 50m rate(http_requests[25m]) * 25 * 60 +eval instant at 50m rate(http_requests_total[25m]) * 25 * 60 {group="canary", instance="0", job="api-server"} 150 {group="canary", instance="0", job="app-server"} 350 {group="canary", instance="1", job="api-server"} 200 @@ -133,7 +133,7 @@ eval instant at 50m rate(http_requests[25m]) * 25 * 60 {group="production", instance="1", job="api-server"} 100 {group="production", instance="1", job="app-server"} 300 -eval instant at 50m (rate((http_requests[25m])) * 25) * 60 +eval instant at 50m (rate((http_requests_total[25m])) * 25) * 60 {group="canary", instance="0", job="api-server"} 150 {group="canary", instance="0", job="app-server"} 350 {group="canary", instance="1", job="api-server"} 200 @@ -144,53 +144,53 @@ eval instant at 50m (rate((http_requests[25m])) * 25) * 60 {group="production", instance="1", job="app-server"} 300 -eval instant at 50m http_requests{group="canary"} and http_requests{instance="0"} - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 +eval instant at 50m http_requests_total{group="canary"} and http_requests_total{instance="0"} + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 -eval instant at 50m (http_requests{group="canary"} + 1) and http_requests{instance="0"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and http_requests_total{instance="0"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m (http_requests{group="canary"} + 1) and on(instance, job) http_requests{instance="0", group="production"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and on(instance, job) http_requests_total{instance="0", group="production"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m (http_requests{group="canary"} + 1) and on(instance) http_requests{instance="0", group="production"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and on(instance) http_requests_total{instance="0", group="production"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m (http_requests{group="canary"} + 1) and ignoring(group) http_requests{instance="0", group="production"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and ignoring(group) http_requests_total{instance="0", group="production"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m (http_requests{group="canary"} + 1) and ignoring(group, job) http_requests{instance="0", group="production"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and ignoring(group, job) http_requests_total{instance="0", group="production"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m http_requests{group="canary"} or http_requests{group="production"} - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 +eval instant at 50m http_requests_total{group="canary"} or http_requests_total{group="production"} + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 # On overlap the rhs samples must be dropped. -eval instant at 50m (http_requests{group="canary"} + 1) or http_requests{instance="1"} +eval instant at 50m (http_requests_total{group="canary"} + 1) or http_requests_total{instance="1"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 {group="canary", instance="1", job="api-server"} 401 {group="canary", instance="1", job="app-server"} 801 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 # Matching only on instance excludes everything that has instance=0/1 but includes # entries without the instance label. -eval instant at 50m (http_requests{group="canary"} + 1) or on(instance) (http_requests or cpu_count or vector_matching_a) +eval instant at 50m (http_requests_total{group="canary"} + 1) or on(instance) (http_requests_total or cpu_count or vector_matching_a) {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 {group="canary", instance="1", job="api-server"} 401 @@ -198,7 +198,7 @@ eval instant at 50m (http_requests{group="canary"} + 1) or on(instance) (http_re vector_matching_a{l="x"} 10 vector_matching_a{l="y"} 20 -eval instant at 50m (http_requests{group="canary"} + 1) or ignoring(l, group, job) (http_requests or cpu_count or vector_matching_a) +eval instant at 50m (http_requests_total{group="canary"} + 1) or ignoring(l, group, job) (http_requests_total or cpu_count or vector_matching_a) {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 {group="canary", instance="1", job="api-server"} 401 @@ -206,81 +206,81 @@ eval instant at 50m (http_requests{group="canary"} + 1) or ignoring(l, group, jo vector_matching_a{l="x"} 10 vector_matching_a{l="y"} 20 -eval instant at 50m http_requests{group="canary"} unless http_requests{instance="0"} - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 +eval instant at 50m http_requests_total{group="canary"} unless http_requests_total{instance="0"} + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 -eval instant at 50m http_requests{group="canary"} unless on(job) http_requests{instance="0"} +eval instant at 50m http_requests_total{group="canary"} unless on(job) http_requests_total{instance="0"} -eval instant at 50m http_requests{group="canary"} unless on(job, instance) http_requests{instance="0"} - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 +eval instant at 50m http_requests_total{group="canary"} unless on(job, instance) http_requests_total{instance="0"} + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 -eval instant at 50m http_requests{group="canary"} / on(instance,job) http_requests{group="production"} +eval instant at 50m http_requests_total{group="canary"} / on(instance,job) http_requests_total{group="production"} {instance="0", job="api-server"} 3 {instance="0", job="app-server"} 1.4 {instance="1", job="api-server"} 2 {instance="1", job="app-server"} 1.3333333333333333 -eval instant at 50m http_requests{group="canary"} unless ignoring(group, instance) http_requests{instance="0"} +eval instant at 50m http_requests_total{group="canary"} unless ignoring(group, instance) http_requests_total{instance="0"} -eval instant at 50m http_requests{group="canary"} unless ignoring(group) http_requests{instance="0"} - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 +eval instant at 50m http_requests_total{group="canary"} unless ignoring(group) http_requests_total{instance="0"} + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 -eval instant at 50m http_requests{group="canary"} / ignoring(group) http_requests{group="production"} +eval instant at 50m http_requests_total{group="canary"} / ignoring(group) http_requests_total{group="production"} {instance="0", job="api-server"} 3 {instance="0", job="app-server"} 1.4 {instance="1", job="api-server"} 2 {instance="1", job="app-server"} 1.3333333333333333 # https://github.com/prometheus/prometheus/issues/1489 -eval instant at 50m http_requests AND ON (dummy) vector(1) - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 - -eval instant at 50m http_requests AND IGNORING (group, instance, job) vector(1) - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 +eval instant at 50m http_requests_total AND ON (dummy) vector(1) + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 + +eval instant at 50m http_requests_total AND IGNORING (group, instance, job) vector(1) + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 # Comparisons. -eval instant at 50m SUM(http_requests) BY (job) > 1000 +eval instant at 50m SUM(http_requests_total) BY (job) > 1000 {job="app-server"} 2600 -eval instant at 50m 1000 < SUM(http_requests) BY (job) +eval instant at 50m 1000 < SUM(http_requests_total) BY (job) {job="app-server"} 2600 -eval instant at 50m SUM(http_requests) BY (job) <= 1000 +eval instant at 50m SUM(http_requests_total) BY (job) <= 1000 {job="api-server"} 1000 -eval instant at 50m SUM(http_requests) BY (job) != 1000 +eval instant at 50m SUM(http_requests_total) BY (job) != 1000 {job="app-server"} 2600 -eval instant at 50m SUM(http_requests) BY (job) == 1000 +eval instant at 50m SUM(http_requests_total) BY (job) == 1000 {job="api-server"} 1000 -eval instant at 50m SUM(http_requests) BY (job) == bool 1000 +eval instant at 50m SUM(http_requests_total) BY (job) == bool 1000 {job="api-server"} 1 {job="app-server"} 0 -eval instant at 50m SUM(http_requests) BY (job) == bool SUM(http_requests) BY (job) +eval instant at 50m SUM(http_requests_total) BY (job) == bool SUM(http_requests_total) BY (job) {job="api-server"} 1 {job="app-server"} 1 -eval instant at 50m SUM(http_requests) BY (job) != bool SUM(http_requests) BY (job) +eval instant at 50m SUM(http_requests_total) BY (job) != bool SUM(http_requests_total) BY (job) {job="api-server"} 0 {job="app-server"} 0 @@ -290,12 +290,12 @@ eval instant at 50m 0 == bool 1 eval instant at 50m 1 == bool 1 1 -eval instant at 50m http_requests{job="api-server", instance="0", group="production"} == bool 100 +eval instant at 50m http_requests_total{job="api-server", instance="0", group="production"} == bool 100 {job="api-server", instance="0", group="production"} 1 # The histogram is ignored here so the result doesn't change but it has an info annotation now. eval_info instant at 5m {job="app-server"} == 80 - http_requests{group="canary", instance="1", job="app-server"} 80 + http_requests_total{group="canary", instance="1", job="app-server"} 80 eval_info instant at 5m http_requests_histogram != 80 @@ -694,7 +694,7 @@ eval_info range from 0 to 24m step 6m left_histograms == 0 eval_info range from 0 to 24m step 6m left_histograms != 3 # No results. -eval range from 0 to 24m step 6m left_histograms != 0 +eval_info range from 0 to 24m step 6m left_histograms != 0 # No results. eval_info range from 0 to 24m step 6m left_histograms > 3 @@ -703,7 +703,7 @@ eval_info range from 0 to 24m step 6m left_histograms > 3 eval_info range from 0 to 24m step 6m left_histograms > 0 # No results. -eval range from 0 to 24m step 6m left_histograms >= 3 +eval_info range from 0 to 24m step 6m left_histograms >= 3 # No results. eval_info range from 0 to 24m step 6m left_histograms >= 0 @@ -718,7 +718,7 @@ eval_info range from 0 to 24m step 6m left_histograms < 0 eval_info range from 0 to 24m step 6m left_histograms <= 3 # No results. -eval range from 0 to 24m step 6m left_histograms <= 0 +eval_info range from 0 to 24m step 6m left_histograms <= 0 # No results. eval_info range from 0 to 24m step 6m left_histograms == bool 3 @@ -791,40 +791,87 @@ eval range from 0 to 60m step 6m NaN == left_floats eval range from 0 to 60m step 6m NaN == bool left_floats {} 0 0 _ _ 0 _ 0 0 0 0 0 -eval range from 0 to 24m step 6m 3 == left_histograms +eval_info range from 0 to 24m step 6m 3 == left_histograms # No results. -eval range from 0 to 24m step 6m 0 == left_histograms +eval_info range from 0 to 24m step 6m 0 == left_histograms # No results. -eval range from 0 to 24m step 6m 3 != left_histograms +eval_info range from 0 to 24m step 6m 3 != left_histograms # No results. -eval range from 0 to 24m step 6m 0 != left_histograms +eval_info range from 0 to 24m step 6m 0 != left_histograms # No results. -eval range from 0 to 24m step 6m 3 < left_histograms +eval_info range from 0 to 24m step 6m 3 < left_histograms # No results. -eval range from 0 to 24m step 6m 0 < left_histograms +eval_info range from 0 to 24m step 6m 0 < left_histograms # No results. -eval range from 0 to 24m step 6m 3 < left_histograms +eval_info range from 0 to 24m step 6m 3 < left_histograms # No results. -eval range from 0 to 24m step 6m 0 < left_histograms +eval_info range from 0 to 24m step 6m 0 < left_histograms # No results. -eval range from 0 to 24m step 6m 3 > left_histograms +eval_info range from 0 to 24m step 6m 3 > left_histograms # No results. -eval range from 0 to 24m step 6m 0 > left_histograms +eval_info range from 0 to 24m step 6m 0 > left_histograms # No results. -eval range from 0 to 24m step 6m 3 >= left_histograms +eval_info range from 0 to 24m step 6m 3 >= left_histograms # No results. -eval range from 0 to 24m step 6m 0 >= left_histograms +eval_info range from 0 to 24m step 6m 0 >= left_histograms # No results. clear + +# Test completely discarding or completely including series in results with "and on" +load_with_nhcb 5m + testhistogram_bucket{le="0.1", id="1"} 0+5x10 + testhistogram_bucket{le="0.2", id="1"} 0+7x10 + testhistogram_bucket{le="+Inf", id="1"} 0+12x10 + testhistogram_bucket{le="0.1", id="2"} 0+4x10 + testhistogram_bucket{le="0.2", id="2"} 0+6x10 + testhistogram_bucket{le="+Inf", id="2"} 0+11x10 + +# Include all series when "and on" with non-empty vector. +eval instant at 10m (testhistogram_bucket) and on() (vector(1) == 1) + {__name__="testhistogram_bucket", le="0.1", id="1"} 10.0 + {__name__="testhistogram_bucket", le="0.2", id="1"} 14.0 + {__name__="testhistogram_bucket", le="+Inf", id="1"} 24.0 + {__name__="testhistogram_bucket", le="0.1", id="2"} 8.0 + {__name__="testhistogram_bucket", le="0.2", id="2"} 12.0 + {__name__="testhistogram_bucket", le="+Inf", id="2"} 22.0 + +eval range from 0 to 10m step 5m (testhistogram_bucket) and on() (vector(1) == 1) + {__name__="testhistogram_bucket", le="0.1", id="1"} 0.0 5.0 10.0 + {__name__="testhistogram_bucket", le="0.2", id="1"} 0.0 7.0 14.0 + {__name__="testhistogram_bucket", le="+Inf", id="1"} 0.0 12.0 24.0 + {__name__="testhistogram_bucket", le="0.1", id="2"} 0.0 4.0 8.0 + {__name__="testhistogram_bucket", le="0.2", id="2"} 0.0 6.0 12.0 + {__name__="testhistogram_bucket", le="+Inf", id="2"} 0.0 11.0 22.0 + +# Exclude all series when "and on" with empty vector. +eval instant at 10m (testhistogram_bucket) and on() (vector(-1) == 1) + +eval range from 0 to 10m step 5m (testhistogram_bucket) and on() (vector(-1) == 1) + +# Include all native histogram series when "and on" with non-empty vector. +eval instant at 10m (testhistogram) and on() (vector(1) == 1) + {__name__="testhistogram", id="1"} {{schema:-53 sum:0 count:24 buckets:[10 4 10] custom_values:[0.1 0.2]}} + {__name__="testhistogram", id="2"} {{schema:-53 sum:0 count:22 buckets:[8 4 10] custom_values:[0.1 0.2]}} + +eval range from 0 to 10m step 5m (testhistogram) and on() (vector(1) == 1) + {__name__="testhistogram", id="1"} {{schema:-53 sum:0 count:0 custom_values:[0.1 0.2]}} {{schema:-53 sum:0 count:12 buckets:[5 2 5] custom_values:[0.1 0.2]}} {{schema:-53 sum:0 count:24 buckets:[10 4 10] custom_values:[0.1 0.2]}} + {__name__="testhistogram", id="2"} {{schema:-53 sum:0 count:0 custom_values:[0.1 0.2]}} {{schema:-53 sum:0 count:11 buckets:[4 2 5] custom_values:[0.1 0.2]}} {{schema:-53 sum:0 count:22 buckets:[8 4 10] custom_values:[0.1 0.2]}} + +# Exclude all native histogram series when "and on" with empty vector. +eval instant at 10m (testhistogram) and on() (vector(-1) == 1) + +eval range from 0 to 10m step 5m (testhistogram) and on() (vector(-1) == 1) + +clear diff --git a/pkg/streamingpromql/testdata/upstream/selectors.test b/pkg/streamingpromql/testdata/upstream/selectors.test index 9d08a7f2590..f9aa1e14e40 100644 --- a/pkg/streamingpromql/testdata/upstream/selectors.test +++ b/pkg/streamingpromql/testdata/upstream/selectors.test @@ -4,111 +4,111 @@ # Provenance-includes-copyright: The Prometheus Authors load 10s - http_requests{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 - http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 - http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 - http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 + http_requests_total{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 + http_requests_total{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 + http_requests_total{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 + http_requests_total{job="api-server", instance="1", group="canary"} 0+40x2000 -eval instant at 8000s rate(http_requests[1m]) +eval instant at 8000s rate(http_requests_total[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 {job="api-server", instance="0", group="canary"} 3 {job="api-server", instance="1", group="canary"} 4 -eval instant at 18000s rate(http_requests[1m]) +eval instant at 18000s rate(http_requests_total[1m]) {job="api-server", instance="0", group="production"} 3 {job="api-server", instance="1", group="production"} 3 {job="api-server", instance="0", group="canary"} 8 {job="api-server", instance="1", group="canary"} 4 -eval instant at 8000s rate(http_requests{group=~"pro.*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~"pro.*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 18000s rate(http_requests{group=~".*ry", instance="1"}[1m]) +eval instant at 18000s rate(http_requests_total{group=~".*ry", instance="1"}[1m]) {job="api-server", instance="1", group="canary"} 4 -eval instant at 18000s rate(http_requests{instance!="3"}[1m] offset 10000s) +eval instant at 18000s rate(http_requests_total{instance!="3"}[1m] offset 10000s) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 {job="api-server", instance="0", group="canary"} 3 {job="api-server", instance="1", group="canary"} 4 -eval instant at 4000s rate(http_requests{instance!="3"}[1m] offset -4000s) +eval instant at 4000s rate(http_requests_total{instance!="3"}[1m] offset -4000s) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 {job="api-server", instance="0", group="canary"} 3 {job="api-server", instance="1", group="canary"} 4 -eval instant at 18000s rate(http_requests[40s]) - rate(http_requests[1m] offset 10000s) +eval instant at 18000s rate(http_requests_total[40s]) - rate(http_requests_total[1m] offset 10000s) {job="api-server", instance="0", group="production"} 2 {job="api-server", instance="1", group="production"} 1 {job="api-server", instance="0", group="canary"} 5 {job="api-server", instance="1", group="canary"} 0 # https://github.com/prometheus/prometheus/issues/3575 -eval instant at 0s http_requests{foo!="bar"} - http_requests{job="api-server", instance="0", group="production"} 0 - http_requests{job="api-server", instance="1", group="production"} 0 - http_requests{job="api-server", instance="0", group="canary"} 0 - http_requests{job="api-server", instance="1", group="canary"} 0 - -eval instant at 0s http_requests{foo!="bar", job="api-server"} - http_requests{job="api-server", instance="0", group="production"} 0 - http_requests{job="api-server", instance="1", group="production"} 0 - http_requests{job="api-server", instance="0", group="canary"} 0 - http_requests{job="api-server", instance="1", group="canary"} 0 - -eval instant at 0s http_requests{foo!~"bar", job="api-server"} - http_requests{job="api-server", instance="0", group="production"} 0 - http_requests{job="api-server", instance="1", group="production"} 0 - http_requests{job="api-server", instance="0", group="canary"} 0 - http_requests{job="api-server", instance="1", group="canary"} 0 - -eval instant at 0s http_requests{foo!~"bar", job="api-server", instance="1", x!="y", z="", group!=""} - http_requests{job="api-server", instance="1", group="production"} 0 - http_requests{job="api-server", instance="1", group="canary"} 0 +eval instant at 0s http_requests_total{foo!="bar"} + http_requests_total{job="api-server", instance="0", group="production"} 0 + http_requests_total{job="api-server", instance="1", group="production"} 0 + http_requests_total{job="api-server", instance="0", group="canary"} 0 + http_requests_total{job="api-server", instance="1", group="canary"} 0 + +eval instant at 0s http_requests_total{foo!="bar", job="api-server"} + http_requests_total{job="api-server", instance="0", group="production"} 0 + http_requests_total{job="api-server", instance="1", group="production"} 0 + http_requests_total{job="api-server", instance="0", group="canary"} 0 + http_requests_total{job="api-server", instance="1", group="canary"} 0 + +eval instant at 0s http_requests_total{foo!~"bar", job="api-server"} + http_requests_total{job="api-server", instance="0", group="production"} 0 + http_requests_total{job="api-server", instance="1", group="production"} 0 + http_requests_total{job="api-server", instance="0", group="canary"} 0 + http_requests_total{job="api-server", instance="1", group="canary"} 0 + +eval instant at 0s http_requests_total{foo!~"bar", job="api-server", instance="1", x!="y", z="", group!=""} + http_requests_total{job="api-server", instance="1", group="production"} 0 + http_requests_total{job="api-server", instance="1", group="canary"} 0 # https://github.com/prometheus/prometheus/issues/7994 -eval instant at 8000s rate(http_requests{group=~"(?i:PRO).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~"(?i:PRO).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*?(?i:PRO).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*?(?i:PRO).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*(?i:DUC).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*(?i:DUC).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*(?i:TION)"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*(?i:TION)"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*(?i:TION).*?"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*(?i:TION).*?"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~"((?i)PRO).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~"((?i)PRO).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*((?i)DUC).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*((?i)DUC).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*((?i)TION)"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*((?i)TION)"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~"(?i:PRODUCTION)"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~"(?i:PRODUCTION)"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*(?i:C).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*(?i:C).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 {job="api-server", instance="0", group="canary"} 3 @@ -138,14 +138,14 @@ load 5m label_grouping_test{a="a", b="abb"} 0+20x10 load 5m - http_requests{job="api-server", instance="0", group="production"} 0+10x10 - http_requests{job="api-server", instance="1", group="production"} 0+20x10 - http_requests{job="api-server", instance="0", group="canary"} 0+30x10 - http_requests{job="api-server", instance="1", group="canary"} 0+40x10 - http_requests{job="app-server", instance="0", group="production"} 0+50x10 - http_requests{job="app-server", instance="1", group="production"} 0+60x10 - http_requests{job="app-server", instance="0", group="canary"} 0+70x10 - http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + http_requests_total{job="api-server", instance="0", group="production"} 0+10x10 + http_requests_total{job="api-server", instance="1", group="production"} 0+20x10 + http_requests_total{job="api-server", instance="0", group="canary"} 0+30x10 + http_requests_total{job="api-server", instance="1", group="canary"} 0+40x10 + http_requests_total{job="app-server", instance="0", group="production"} 0+50x10 + http_requests_total{job="app-server", instance="1", group="production"} 0+60x10 + http_requests_total{job="app-server", instance="0", group="canary"} 0+70x10 + http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10 # Single-letter label names and values. eval instant at 50m x{y="testvalue"} @@ -153,14 +153,14 @@ eval instant at 50m x{y="testvalue"} # Basic Regex eval instant at 50m {__name__=~".+"} - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 x{y="testvalue"} 100 label_grouping_test{a="a", b="abb"} 200 label_grouping_test{a="aa", b="bb"} 100 @@ -169,34 +169,34 @@ eval instant at 50m {__name__=~".+"} cpu_count{instance="0", type="numa"} 300 eval instant at 50m {job=~".+-server", job!~"api-.+"} - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="app-server"} 600 - -eval instant at 50m http_requests{group!="canary"} - http_requests{group="production", instance="1", job="app-server"} 600 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="0", job="api-server"} 100 - -eval instant at 50m http_requests{job=~".+-server",group!="canary"} - http_requests{group="production", instance="1", job="app-server"} 600 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="0", job="api-server"} 100 - -eval instant at 50m http_requests{job!~"api-.+",group!="canary"} - http_requests{group="production", instance="1", job="app-server"} 600 - http_requests{group="production", instance="0", job="app-server"} 500 - -eval instant at 50m http_requests{group="production",job=~"api-.+"} - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="1", job="api-server"} 200 - -eval instant at 50m http_requests{group="production",job="api-server"} offset 5m - http_requests{group="production", instance="0", job="api-server"} 90 - http_requests{group="production", instance="1", job="api-server"} 180 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="app-server"} 600 + +eval instant at 50m http_requests_total{group!="canary"} + http_requests_total{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="0", job="api-server"} 100 + +eval instant at 50m http_requests_total{job=~".+-server",group!="canary"} + http_requests_total{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="0", job="api-server"} 100 + +eval instant at 50m http_requests_total{job!~"api-.+",group!="canary"} + http_requests_total{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="production", instance="0", job="app-server"} 500 + +eval instant at 50m http_requests_total{group="production",job=~"api-.+"} + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="1", job="api-server"} 200 + +eval instant at 50m http_requests_total{group="production",job="api-server"} offset 5m + http_requests_total{group="production", instance="0", job="api-server"} 90 + http_requests_total{group="production", instance="1", job="api-server"} 180 clear diff --git a/pkg/streamingpromql/testdata/upstream/subquery.test b/pkg/streamingpromql/testdata/upstream/subquery.test index 1849a4576bb..bc543fd94a4 100644 --- a/pkg/streamingpromql/testdata/upstream/subquery.test +++ b/pkg/streamingpromql/testdata/upstream/subquery.test @@ -4,43 +4,43 @@ # Provenance-includes-copyright: The Prometheus Authors load 10s - metric 1 2 + metric_total 1 2 # Evaluation before 0s gets no sample. -eval instant at 10s sum_over_time(metric[50s:10s]) +eval instant at 10s sum_over_time(metric_total[50s:10s]) {} 3 -eval instant at 10s sum_over_time(metric[50s:5s]) +eval instant at 10s sum_over_time(metric_total[50s:5s]) {} 4 # Every evaluation yields the last value, i.e. 2 -eval instant at 5m sum_over_time(metric[50s:10s]) +eval instant at 5m sum_over_time(metric_total[50s:10s]) {} 10 -# Series becomes stale at 5m10s (5m after last sample) +# Series becomes stale at 5m10s (5m after last sample). # Hence subquery gets a single sample at 5m10s. -eval instant at 5m59s sum_over_time(metric[60s:10s]) +eval instant at 5m59s sum_over_time(metric_total[60s:10s]) {} 2 -eval instant at 10s rate(metric[20s:10s]) +eval instant at 10s rate(metric_total[20s:10s]) {} 0.1 -eval instant at 20s rate(metric[20s:5s]) +eval instant at 20s rate(metric_total[20s:5s]) {} 0.06666666666666667 clear load 10s - http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 - http_requests{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 - http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 - http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 + http_requests_total{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 + http_requests_total{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 + http_requests_total{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 + http_requests_total{job="api-server", instance="1", group="canary"} 0+40x2000 -eval instant at 8000s rate(http_requests{group=~"pro.*"}[1m:10s]) +eval instant at 8000s rate(http_requests_total{group=~"pro.*"}[1m:10s]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 20000s avg_over_time(rate(http_requests[1m])[1m:1s]) +eval instant at 20000s avg_over_time(rate(http_requests_total[1m])[1m:1s]) {job="api-server", instance="0", group="canary"} 8 {job="api-server", instance="1", group="canary"} 4 {job="api-server", instance="1", group="production"} 3 @@ -49,64 +49,64 @@ eval instant at 20000s avg_over_time(rate(http_requests[1m])[1m:1s]) clear load 10s - metric1 0+1x1000 - metric2 0+2x1000 - metric3 0+3x1000 + metric1_total 0+1x1000 + metric2_total 0+2x1000 + metric3_total 0+3x1000 -eval instant at 1000s sum_over_time(metric1[30s:10s]) +eval instant at 1000s sum_over_time(metric1_total[30s:10s]) {} 297 # This is (97 + 98*2 + 99*2 + 100), because other than 97@975s and 100@1000s, # everything else is repeated with the 5s step. -eval instant at 1000s sum_over_time(metric1[30s:5s]) +eval instant at 1000s sum_over_time(metric1_total[30s:5s]) {} 591 # Offset is aligned with the step, so this is from [98@980s, 99@990s, 100@1000s]. -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 10s) {} 297 # Same result for different offsets due to step alignment. -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 9s) {} 297 -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 7s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 7s) {} 297 -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 5s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 5s) {} 297 -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 3s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 3s) {} 297 -eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s) +eval instant at 1010s sum_over_time((metric1_total)[30s:10s] offset 3s) {} 297 -eval instant at 1010s sum_over_time(metric1[30:10] offset 3) +eval instant at 1010s sum_over_time(metric1_total[30:10] offset 3) {} 297 -eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s) +eval instant at 1010s sum_over_time((metric1_total)[30:10s] offset 3s) {} 297 -eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s) +eval instant at 1010s sum_over_time((metric1_total)[30:10s] offset 3s) {} 297 -eval instant at 1010s sum_over_time((metric1)[30:10] offset 3s) +eval instant at 1010s sum_over_time((metric1_total)[30:10] offset 3s) {} 297 -eval instant at 1010s sum_over_time((metric1)[30:10] offset 3) +eval instant at 1010s sum_over_time((metric1_total)[30:10] offset 3) {} 297 -# Nested subqueries -eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s]) +# Nested subqueries. +eval instant at 1000s rate(sum_over_time(metric1_total[30s:10s])[50s:10s]) {} 0.30000000000000004 -eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s]) +eval instant at 1000s rate(sum_over_time(metric2_total[30s:10s])[50s:10s]) {} 0.6000000000000001 - -eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s]) + +eval instant at 1000s rate(sum_over_time(metric3_total[30s:10s])[50s:10s]) {} 0.9 - -eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s]) + +eval instant at 1000s rate(sum_over_time((metric1_total+metric2_total+metric3_total)[30s:10s])[30s:10s]) {} 1.8 clear @@ -114,28 +114,28 @@ clear # Fibonacci sequence, to ensure the rate is not constant. # Additional note: using subqueries unnecessarily is unwise. load 7s - metric 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 1597 2584 4181 6765 10946 17711 28657 46368 75025 121393 196418 317811 514229 832040 1346269 2178309 3524578 5702887 9227465 14930352 24157817 39088169 63245986 102334155 165580141 267914296 433494437 701408733 1134903170 1836311903 2971215073 4807526976 7778742049 12586269025 20365011074 32951280099 53316291173 86267571272 139583862445 225851433717 365435296162 591286729879 956722026041 1548008755920 2504730781961 4052739537881 6557470319842 10610209857723 17167680177565 27777890035288 44945570212853 72723460248141 117669030460994 190392490709135 308061521170129 498454011879264 806515533049393 1304969544928657 2111485077978050 3416454622906707 5527939700884757 8944394323791464 14472334024676221 23416728348467685 37889062373143906 61305790721611591 99194853094755497 160500643816367088 259695496911122585 420196140727489673 679891637638612258 1100087778366101931 1779979416004714189 2880067194370816120 4660046610375530309 7540113804746346429 12200160415121876738 19740274219868223167 31940434634990099905 51680708854858323072 83621143489848422977 135301852344706746049 218922995834555169026 354224848179261915075 573147844013817084101 927372692193078999176 1500520536206896083277 2427893228399975082453 3928413764606871165730 6356306993006846248183 10284720757613717413913 16641027750620563662096 26925748508234281076009 43566776258854844738105 70492524767089125814114 114059301025943970552219 184551825793033096366333 298611126818977066918552 483162952612010163284885 781774079430987230203437 1264937032042997393488322 2046711111473984623691759 3311648143516982017180081 5358359254990966640871840 8670007398507948658051921 14028366653498915298923761 22698374052006863956975682 36726740705505779255899443 59425114757512643212875125 96151855463018422468774568 155576970220531065681649693 251728825683549488150424261 407305795904080553832073954 659034621587630041982498215 1066340417491710595814572169 1725375039079340637797070384 2791715456571051233611642553 4517090495650391871408712937 7308805952221443105020355490 11825896447871834976429068427 19134702400093278081449423917 30960598847965113057878492344 50095301248058391139327916261 81055900096023504197206408605 131151201344081895336534324866 212207101440105399533740733471 343358302784187294870275058337 555565404224292694404015791808 898923707008479989274290850145 1454489111232772683678306641953 2353412818241252672952597492098 3807901929474025356630904134051 6161314747715278029583501626149 9969216677189303386214405760200 16130531424904581415797907386349 26099748102093884802012313146549 42230279526998466217810220532898 68330027629092351019822533679447 110560307156090817237632754212345 178890334785183168257455287891792 289450641941273985495088042104137 468340976726457153752543329995929 757791618667731139247631372100066 1226132595394188293000174702095995 1983924214061919432247806074196061 3210056809456107725247980776292056 5193981023518027157495786850488117 8404037832974134882743767626780173 13598018856492162040239554477268290 22002056689466296922983322104048463 35600075545958458963222876581316753 57602132235424755886206198685365216 93202207781383214849429075266681969 150804340016807970735635273952047185 244006547798191185585064349218729154 394810887814999156320699623170776339 638817435613190341905763972389505493 1033628323428189498226463595560281832 1672445759041379840132227567949787325 2706074082469569338358691163510069157 4378519841510949178490918731459856482 7084593923980518516849609894969925639 11463113765491467695340528626429782121 18547707689471986212190138521399707760 + metric_total 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 1597 2584 4181 6765 10946 17711 28657 46368 75025 121393 196418 317811 514229 832040 1346269 2178309 3524578 5702887 9227465 14930352 24157817 39088169 63245986 102334155 165580141 267914296 433494437 701408733 1134903170 1836311903 2971215073 4807526976 7778742049 12586269025 20365011074 32951280099 53316291173 86267571272 139583862445 225851433717 365435296162 591286729879 956722026041 1548008755920 2504730781961 4052739537881 6557470319842 10610209857723 17167680177565 27777890035288 44945570212853 72723460248141 117669030460994 190392490709135 308061521170129 498454011879264 806515533049393 1304969544928657 2111485077978050 3416454622906707 5527939700884757 8944394323791464 14472334024676221 23416728348467685 37889062373143906 61305790721611591 99194853094755497 160500643816367088 259695496911122585 420196140727489673 679891637638612258 1100087778366101931 1779979416004714189 2880067194370816120 4660046610375530309 7540113804746346429 12200160415121876738 19740274219868223167 31940434634990099905 51680708854858323072 83621143489848422977 135301852344706746049 218922995834555169026 354224848179261915075 573147844013817084101 927372692193078999176 1500520536206896083277 2427893228399975082453 3928413764606871165730 6356306993006846248183 10284720757613717413913 16641027750620563662096 26925748508234281076009 43566776258854844738105 70492524767089125814114 114059301025943970552219 184551825793033096366333 298611126818977066918552 483162952612010163284885 781774079430987230203437 1264937032042997393488322 2046711111473984623691759 3311648143516982017180081 5358359254990966640871840 8670007398507948658051921 14028366653498915298923761 22698374052006863956975682 36726740705505779255899443 59425114757512643212875125 96151855463018422468774568 155576970220531065681649693 251728825683549488150424261 407305795904080553832073954 659034621587630041982498215 1066340417491710595814572169 1725375039079340637797070384 2791715456571051233611642553 4517090495650391871408712937 7308805952221443105020355490 11825896447871834976429068427 19134702400093278081449423917 30960598847965113057878492344 50095301248058391139327916261 81055900096023504197206408605 131151201344081895336534324866 212207101440105399533740733471 343358302784187294870275058337 555565404224292694404015791808 898923707008479989274290850145 1454489111232772683678306641953 2353412818241252672952597492098 3807901929474025356630904134051 6161314747715278029583501626149 9969216677189303386214405760200 16130531424904581415797907386349 26099748102093884802012313146549 42230279526998466217810220532898 68330027629092351019822533679447 110560307156090817237632754212345 178890334785183168257455287891792 289450641941273985495088042104137 468340976726457153752543329995929 757791618667731139247631372100066 1226132595394188293000174702095995 1983924214061919432247806074196061 3210056809456107725247980776292056 5193981023518027157495786850488117 8404037832974134882743767626780173 13598018856492162040239554477268290 22002056689466296922983322104048463 35600075545958458963222876581316753 57602132235424755886206198685365216 93202207781383214849429075266681969 150804340016807970735635273952047185 244006547798191185585064349218729154 394810887814999156320699623170776339 638817435613190341905763972389505493 1033628323428189498226463595560281832 1672445759041379840132227567949787325 2706074082469569338358691163510069157 4378519841510949178490918731459856482 7084593923980518516849609894969925639 11463113765491467695340528626429782121 18547707689471986212190138521399707760 # Extrapolated from [3@21, 144@77]: (144 - 3) / (77 - 21) -eval instant at 80s rate(metric[1m]) +eval instant at 80s rate(metric_total[1m]) {} 2.517857143 # Extrapolated to range start for counter, [2@20, 144@80]: (144 - 2) / (80 - 20) -eval instant at 80s rate(metric[1m500ms:10s]) +eval instant at 80s rate(metric_total[1m500ms:10s]) {} 2.3666666666666667 # Extrapolated to zero value for counter, [2@20, 144@80]: (144 - 0) / 61 -eval instant at 80s rate(metric[1m1s:10s]) +eval instant at 80s rate(metric_total[1m1s:10s]) {} 2.360655737704918 # Only one value between 10s and 20s, 2@14 -eval instant at 20s min_over_time(metric[10s]) +eval instant at 20s min_over_time(metric_total[10s]) {} 2 # min(2@20) -eval instant at 20s min_over_time(metric[15s:10s]) +eval instant at 20s min_over_time(metric_total[15s:10s]) {} 1 -eval instant at 20m min_over_time(rate(metric[5m])[20m:1m]) +eval instant at 20m min_over_time(rate(metric_total[5m])[20m:1m]) {} 0.12119047619047618 diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index 93331cf99f2..2bec6cfabb9 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -16,6 +16,7 @@ package relabel import ( "crypto/md5" "encoding/binary" + "encoding/json" "errors" "fmt" "strconv" @@ -84,20 +85,20 @@ func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error { type Config struct { // A list of labels from which values are taken and concatenated // with the configured separator in order. - SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty"` + SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty" json:"sourceLabels,omitempty"` // Separator is the string between concatenated values from the source labels. - Separator string `yaml:"separator,omitempty"` + Separator string `yaml:"separator,omitempty" json:"separator,omitempty"` // Regex against which the concatenation is matched. - Regex Regexp `yaml:"regex,omitempty"` + Regex Regexp `yaml:"regex,omitempty" json:"regex,omitempty"` // Modulus to take of the hash of concatenated values from the source labels. - Modulus uint64 `yaml:"modulus,omitempty"` + Modulus uint64 `yaml:"modulus,omitempty" json:"modulus,omitempty"` // TargetLabel is the label to which the resulting string is written in a replacement. // Regexp interpolation is allowed for the replace action. - TargetLabel string `yaml:"target_label,omitempty"` + TargetLabel string `yaml:"target_label,omitempty" json:"targetLabel,omitempty"` // Replacement is the regex replacement pattern to be used. - Replacement string `yaml:"replacement,omitempty"` + Replacement string `yaml:"replacement,omitempty" json:"replacement,omitempty"` // Action is the action to be performed for the relabeling. - Action Action `yaml:"action,omitempty"` + Action Action `yaml:"action,omitempty" json:"action,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -207,6 +208,25 @@ func (re Regexp) MarshalYAML() (interface{}, error) { return nil, nil } +// UnmarshalJSON implements the json.Unmarshaler interface. +func (re *Regexp) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + r, err := NewRegexp(s) + if err != nil { + return err + } + *re = r + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (re Regexp) MarshalJSON() ([]byte, error) { + return json.Marshal(re.String()) +} + // IsZero implements the yaml.IsZeroer interface. func (re Regexp) IsZero() bool { return re.Regexp == DefaultRelabelConfig.Regex.Regexp diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go index 6fe2e8e54eb..ff756965f49 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go @@ -243,7 +243,8 @@ func (p *NHCBParser) compareLabels() bool { // Different metric type. return true } - if p.lastHistogramName != convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) { + _, name := convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) + if p.lastHistogramName != name { // Different metric name. return true } @@ -253,8 +254,8 @@ func (p *NHCBParser) compareLabels() bool { } // Save the label set of the classic histogram without suffix and bucket `le` label. -func (p *NHCBParser) storeClassicLabels() { - p.lastHistogramName = convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) +func (p *NHCBParser) storeClassicLabels(name string) { + p.lastHistogramName = name p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel) p.lastHistogramExponential = false } @@ -275,25 +276,30 @@ func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool { } mName := lset.Get(labels.MetricName) // Sanity check to ensure that the TYPE metadata entry name is the same as the base name. - if convertnhcb.GetHistogramMetricBaseName(mName) != string(p.bName) { + suffixType, name := convertnhcb.GetHistogramMetricBaseName(mName) + if name != string(p.bName) { return false } - switch { - case strings.HasSuffix(mName, "_bucket") && lset.Has(labels.BucketLabel): + switch suffixType { + case convertnhcb.SuffixBucket: + if !lset.Has(labels.BucketLabel) { + // This should not really happen. + return false + } le, err := strconv.ParseFloat(lset.Get(labels.BucketLabel), 64) if err == nil && !math.IsNaN(le) { - p.processClassicHistogramSeries(lset, "_bucket", func(hist *convertnhcb.TempHistogram) { + p.processClassicHistogramSeries(lset, name, func(hist *convertnhcb.TempHistogram) { _ = hist.SetBucketCount(le, p.value) }) return true } - case strings.HasSuffix(mName, "_count"): - p.processClassicHistogramSeries(lset, "_count", func(hist *convertnhcb.TempHistogram) { + case convertnhcb.SuffixCount: + p.processClassicHistogramSeries(lset, name, func(hist *convertnhcb.TempHistogram) { _ = hist.SetCount(p.value) }) return true - case strings.HasSuffix(mName, "_sum"): - p.processClassicHistogramSeries(lset, "_sum", func(hist *convertnhcb.TempHistogram) { + case convertnhcb.SuffixSum: + p.processClassicHistogramSeries(lset, name, func(hist *convertnhcb.TempHistogram) { _ = hist.SetSum(p.value) }) return true @@ -301,12 +307,12 @@ func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool { return false } -func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, suffix string, updateHist func(*convertnhcb.TempHistogram)) { +func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, name string, updateHist func(*convertnhcb.TempHistogram)) { if p.state != stateCollecting { - p.storeClassicLabels() + p.storeClassicLabels(name) p.tempCT = p.parser.CreatedTimestamp() p.state = stateCollecting - p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, suffix) + p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, name) } p.storeExemplars() updateHist(&p.tempNHCB) diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l index 9afbbbd8bd5..09106c52ced 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l @@ -69,6 +69,7 @@ S [ ] {S}#{S}\{ l.state = sExemplar; return tComment {L}({L}|{D})* return tLName +\"(\\.|[^\\"\n])*\" l.state = sExemplar; return tQString \} l.state = sEValue; return tBraceClose = l.state = sEValue; return tEqual \"(\\.|[^\\"\n])*\" l.state = sExemplar; return tLValue diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go index c8789ef60d4..c0b2fcdb4d8 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go @@ -53,9 +53,9 @@ yystate0: case 8: // start condition: sExemplar goto yystart57 case 9: // start condition: sEValue - goto yystart62 + goto yystart65 case 10: // start condition: sETimestamp - goto yystart68 + goto yystart71 } yystate1: @@ -538,125 +538,153 @@ yystart57: switch { default: goto yyabort - case c == ',': + case c == '"': goto yystate58 + case c == ',': + goto yystate61 case c == '=': - goto yystate59 + goto yystate62 case c == '}': - goto yystate61 + goto yystate64 case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate60 + goto yystate63 } yystate58: c = l.next() - goto yyrule26 + switch { + default: + goto yyabort + case c == '"': + goto yystate59 + case c == '\\': + goto yystate60 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate58 + } yystate59: c = l.next() - goto yyrule24 + goto yyrule23 yystate60: c = l.next() switch { default: - goto yyrule22 - case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate60 + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate58 } yystate61: c = l.next() - goto yyrule23 + goto yyrule27 yystate62: c = l.next() -yystart62: + goto yyrule25 + +yystate63: + c = l.next() + switch { + default: + goto yyrule22 + case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate63 + } + +yystate64: + c = l.next() + goto yyrule24 + +yystate65: + c = l.next() +yystart65: switch { default: goto yyabort case c == ' ': - goto yystate63 + goto yystate66 case c == '"': - goto yystate65 + goto yystate68 } -yystate63: +yystate66: c = l.next() switch { default: goto yyabort case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate64 + goto yystate67 } -yystate64: +yystate67: c = l.next() switch { default: - goto yyrule27 + goto yyrule28 case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate64 + goto yystate67 } -yystate65: +yystate68: c = l.next() switch { default: goto yyabort case c == '"': - goto yystate66 + goto yystate69 case c == '\\': - goto yystate67 + goto yystate70 case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': - goto yystate65 + goto yystate68 } -yystate66: +yystate69: c = l.next() - goto yyrule25 + goto yyrule26 -yystate67: +yystate70: c = l.next() switch { default: goto yyabort case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': - goto yystate65 + goto yystate68 } -yystate68: +yystate71: c = l.next() -yystart68: +yystart71: switch { default: goto yyabort case c == ' ': - goto yystate70 + goto yystate73 case c == '\n': - goto yystate69 + goto yystate72 } -yystate69: +yystate72: c = l.next() - goto yyrule29 + goto yyrule30 -yystate70: +yystate73: c = l.next() switch { default: goto yyabort case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate71 + goto yystate74 } -yystate71: +yystate74: c = l.next() switch { default: - goto yyrule28 + goto yyrule29 case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate71 + goto yystate74 } yyrule1: // #{S} @@ -782,39 +810,45 @@ yyrule22: // {L}({L}|{D})* { return tLName } -yyrule23: // \} +yyrule23: // \"(\\.|[^\\"\n])*\" + { + l.state = sExemplar + return tQString + goto yystate0 + } +yyrule24: // \} { l.state = sEValue return tBraceClose goto yystate0 } -yyrule24: // = +yyrule25: // = { l.state = sEValue return tEqual goto yystate0 } -yyrule25: // \"(\\.|[^\\"\n])*\" +yyrule26: // \"(\\.|[^\\"\n])*\" { l.state = sExemplar return tLValue goto yystate0 } -yyrule26: // , +yyrule27: // , { return tComma } -yyrule27: // {S}[^ \n]+ +yyrule28: // {S}[^ \n]+ { l.state = sETimestamp return tValue goto yystate0 } -yyrule28: // {S}[^ \n]+ +yyrule29: // {S}[^ \n]+ { return tTimestamp } -yyrule29: // \n +yyrule30: // \n if true { // avoid go vet determining the below panic will not be reached l.state = sInit return tLinebreak @@ -859,10 +893,10 @@ yyabort: // no lexem recognized goto yystate57 } if false { - goto yystate62 + goto yystate65 } if false { - goto yystate68 + goto yystate71 } } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index 3ae9c7ddfc3..16e805f3a93 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -388,7 +388,7 @@ func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, mfName p.skipCTSeries = skipCTSeries // Do we need to set it? } -// resetCtParseValues resets the parser to the state before CreatedTimestamp method was called. +// resetCTParseValues resets the parser to the state before CreatedTimestamp method was called. func (p *OpenMetricsParser) resetCTParseValues() { p.ctHashSet = 0 p.skipCTSeries = true diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index 09a2005a36c..956fd4652ac 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -160,7 +160,7 @@ func newAlertMetrics(r prometheus.Registerer, queueCap int, queueLen, alertmanag Namespace: namespace, Subsystem: subsystem, Name: "errors_total", - Help: "Total number of errors sending alert notifications.", + Help: "Total number of sent alerts affected by errors.", }, []string{alertmanagerLabel}, ), @@ -619,13 +619,13 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) { if err := n.sendOne(ctx, client, url, payload); err != nil { - n.logger.Error("Error sending alert", "alertmanager", url, "count", count, "err", err) - n.metrics.errors.WithLabelValues(url).Inc() + n.logger.Error("Error sending alerts", "alertmanager", url, "count", count, "err", err) + n.metrics.errors.WithLabelValues(url).Add(float64(count)) } else { numSuccess.Inc() } n.metrics.latency.WithLabelValues(url).Observe(time.Since(begin).Seconds()) - n.metrics.sent.WithLabelValues(url).Add(float64(len(amAlerts))) + n.metrics.sent.WithLabelValues(url).Add(float64(count)) wg.Done() }(ctx, ams.client, am.url().String(), payload, len(amAlerts)) diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index b2bba56a2f1..c741b2390da 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -3336,7 +3336,11 @@ func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping [] var buf []byte for _, s := range vec { enh.resetBuilder(s.Metric) - enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64)) + if s.H == nil { + enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64)) + } else { + enh.lb.Set(valueLabel, s.H.String()) + } metric := enh.lb.Labels() // Considering the count_values() @@ -3448,7 +3452,7 @@ func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotat return nil } -// groupingKey builds and returns the grouping key for the given metric and +// generateGroupingKey builds and returns the grouping key for the given metric and // grouping labels. func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) { if without { diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index 009a370ebf2..4be743040d3 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -465,11 +465,7 @@ func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *Eval return vals[0].(Vector), nil } -// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === -func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vals[0].(Vector) - minVal := vals[1].(Vector)[0].F - maxVal := vals[2].(Vector)[0].F +func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if maxVal < minVal { return enh.Out, nil } @@ -490,46 +486,26 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper return enh.Out, nil } +// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === +func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vals[0].(Vector) + minVal := vals[1].(Vector)[0].F + maxVal := vals[2].(Vector)[0].F + return clamp(vec, minVal, maxVal, enh) +} + // === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) maxVal := vals[1].(Vector)[0].F - for _, el := range vec { - if el.H != nil { - // Process only float samples. - continue - } - if !enh.enableDelayedNameRemoval { - el.Metric = el.Metric.DropMetricName() - } - enh.Out = append(enh.Out, Sample{ - Metric: el.Metric, - F: math.Min(maxVal, el.F), - DropName: true, - }) - } - return enh.Out, nil + return clamp(vec, math.Inf(-1), maxVal, enh) } // === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) minVal := vals[1].(Vector)[0].F - for _, el := range vec { - if el.H != nil { - // Process only float samples. - continue - } - if !enh.enableDelayedNameRemoval { - el.Metric = el.Metric.DropMetricName() - } - enh.Out = append(enh.Out, Sample{ - Metric: el.Metric, - F: math.Max(minVal, el.F), - DropName: true, - }) - } - return enh.Out, nil + return clamp(vec, minVal, math.Inf(+1), enh) } // === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) === diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go b/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go index 4cbd39b403a..a4dbd64ff8d 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go @@ -491,8 +491,8 @@ func newTempHistogramWrapper() tempHistogramWrapper { } } -func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogram func(*convertnhcb.TempHistogram, float64)) { - m2 := convertnhcb.GetHistogramMetricBase(m, suffix) +func processClassicHistogramSeries(m labels.Labels, name string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogram func(*convertnhcb.TempHistogram, float64)) { + m2 := convertnhcb.GetHistogramMetricBase(m, name) m2hash := m2.Hash() histogramWrapper, exists := histogramMap[m2hash] if !exists { @@ -523,21 +523,25 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { for hash, smpls := range cmd.defs { m := cmd.metrics[hash] mName := m.Get(labels.MetricName) - switch { - case strings.HasSuffix(mName, "_bucket") && m.Has(labels.BucketLabel): + suffixType, name := convertnhcb.GetHistogramMetricBaseName(mName) + switch suffixType { + case convertnhcb.SuffixBucket: + if !m.Has(labels.BucketLabel) { + panic(fmt.Sprintf("expected bucket label in metric %s", m)) + } le, err := strconv.ParseFloat(m.Get(labels.BucketLabel), 64) if err != nil || math.IsNaN(le) { continue } - processClassicHistogramSeries(m, "_bucket", histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { + processClassicHistogramSeries(m, name, histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { _ = histogram.SetBucketCount(le, f) }) - case strings.HasSuffix(mName, "_count"): - processClassicHistogramSeries(m, "_count", histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { + case convertnhcb.SuffixCount: + processClassicHistogramSeries(m, name, histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { _ = histogram.SetCount(f) }) - case strings.HasSuffix(mName, "_sum"): - processClassicHistogramSeries(m, "_sum", histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { + case convertnhcb.SuffixSum: + processClassicHistogramSeries(m, name, histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { _ = histogram.SetSum(f) }) } @@ -1097,12 +1101,16 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error { if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line) } - countWarnings, _ := res.Warnings.CountWarningsAndInfo() - if !cmd.warn && countWarnings > 0 { + countWarnings, countInfo := res.Warnings.CountWarningsAndInfo() + switch { + case !cmd.warn && countWarnings > 0: return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings) - } - if cmd.warn && countWarnings == 0 { + case cmd.warn && countWarnings == 0: return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line) + case !cmd.info && countInfo > 0: + return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings) + case cmd.info && countInfo == 0: + return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", cmd.expr, cmd.line) } defer q.Close() @@ -1148,13 +1156,14 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) } countWarnings, countInfo := res.Warnings.CountWarningsAndInfo() - if !cmd.warn && countWarnings > 0 { + switch { + case !cmd.warn && countWarnings > 0: return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) - } - if cmd.warn && countWarnings == 0 { + case cmd.warn && countWarnings == 0: return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) - } - if cmd.info && countInfo == 0 { + case !cmd.info && countInfo > 0: + return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) + case cmd.info && countInfo == 0: return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", iq.expr, cmd.line) } err = cmd.compareResult(res.Value) diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test index 19a896a6fbb..00f393a8656 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test @@ -351,37 +351,41 @@ load 5m version{job="app-server", instance="1", group="production"} 6 version{job="app-server", instance="0", group="canary"} 7 version{job="app-server", instance="1", group="canary"} 7 + version{job="app-server", instance="2", group="canary"} {{schema:0 sum:10 count:20 z_bucket_w:0.001 z_bucket:2 buckets:[1 2] n_buckets:[1 2]}} + version{job="app-server", instance="3", group="canary"} {{schema:0 sum:10 count:20 z_bucket_w:0.001 z_bucket:2 buckets:[1 2] n_buckets:[1 2]}} eval instant at 1m count_values("version", version) {version="6"} 5 {version="7"} 2 {version="8"} 2 - + {version="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}"} 2 eval instant at 1m count_values(((("version"))), version) - {version="6"} 5 - {version="7"} 2 - {version="8"} 2 - + {version="6"} 5 + {version="7"} 2 + {version="8"} 2 + {version="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}"} 2 eval instant at 1m count_values without (instance)("version", version) {job="api-server", group="production", version="6"} 3 {job="api-server", group="canary", version="8"} 2 {job="app-server", group="production", version="6"} 2 {job="app-server", group="canary", version="7"} 2 + {job="app-server", group="canary", version="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}"} 2 # Overwrite label with output. Don't do this. eval instant at 1m count_values without (instance)("job", version) {job="6", group="production"} 5 {job="8", group="canary"} 2 {job="7", group="canary"} 2 + {job="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}", group="canary"} 2 # Overwrite label with output. Don't do this. eval instant at 1m count_values by (job, group)("job", version) {job="6", group="production"} 5 {job="8", group="canary"} 2 {job="7", group="canary"} 2 - + {job="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}", group="canary"} 2 # Tests for quantile. clear @@ -441,12 +445,14 @@ load 10s data{test="uneven samples",point="a"} 0 data{test="uneven samples",point="b"} 1 data{test="uneven samples",point="c"} 4 + data{test="histogram sample",point="c"} {{schema:0 sum:0 count:0}} foo .8 eval instant at 1m group without(point)(data) {test="two samples"} 1 {test="three samples"} 1 {test="uneven samples"} 1 + {test="histogram sample"} 1 eval instant at 1m group(foo) {} 1 @@ -618,11 +624,11 @@ eval_info instant at 0m stddev({label="c"}) eval_info instant at 0m stdvar({label="c"}) -eval instant at 0m stddev by (label) (series) +eval_info instant at 0m stddev by (label) (series) {label="a"} 0 {label="b"} 0 -eval instant at 0m stdvar by (label) (series) +eval_info instant at 0m stdvar by (label) (series) {label="a"} 0 {label="b"} 0 @@ -633,17 +639,17 @@ load 5m series{label="b"} 1 series{label="c"} 2 -eval instant at 0m stddev(series) +eval_info instant at 0m stddev(series) {} 0.5 -eval instant at 0m stdvar(series) +eval_info instant at 0m stdvar(series) {} 0.25 -eval instant at 0m stddev by (label) (series) +eval_info instant at 0m stddev by (label) (series) {label="b"} 0 {label="c"} 0 -eval instant at 0m stdvar by (label) (series) +eval_info instant at 0m stdvar by (label) (series) {label="b"} 0 {label="c"} 0 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test index 4091f7eabf2..1ad301bdb7d 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test @@ -90,7 +90,8 @@ eval instant at 25s sum_over_time(metric{job="1"}[100] offset 50s @ 100) eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100 {job="1"} 15 -eval instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metric{job="2"}[123s] @ 200), "job", "1", "", "") +# Note that this triggers an info annotation because we are rate'ing a metric that does not end in `_total`. +eval_info instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metric{job="2"}[123s] @ 200), "job", "1", "", "") {job="1"} 0.3 eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "") diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test index 23c56565f54..e4caf0e3633 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test @@ -83,13 +83,13 @@ clear # Tests for increase(). load 5m - http_requests{path="/foo"} 0+10x10 - http_requests{path="/bar"} 0+18x5 0+18x5 - http_requests{path="/dings"} 10+10x10 - http_requests{path="/bumms"} 1+10x10 + http_requests_total{path="/foo"} 0+10x10 + http_requests_total{path="/bar"} 0+18x5 0+18x5 + http_requests_total{path="/dings"} 10+10x10 + http_requests_total{path="/bumms"} 1+10x10 # Tests for increase(). -eval instant at 50m increase(http_requests[50m]) +eval instant at 50m increase(http_requests_total[50m]) {path="/foo"} 100 {path="/bar"} 160 {path="/dings"} 100 @@ -102,7 +102,7 @@ eval instant at 50m increase(http_requests[50m]) # chosen. However, "bumms" has value 1 at t=0 and would reach 0 at # t=-30s. Here the extrapolation to t=-2m30s would reach a negative # value, and therefore the extrapolation happens only by 30s. -eval instant at 50m increase(http_requests[100m]) +eval instant at 50m increase(http_requests_total[100m]) {path="/foo"} 100 {path="/bar"} 162 {path="/dings"} 105 @@ -115,57 +115,57 @@ clear # So the sequence 3 2 (decreasing counter = reset) is interpreted the same as 3 0 1 2. # Prometheus assumes it missed the intermediate values 0 and 1. load 5m - http_requests{path="/foo"} 0 1 2 3 2 3 4 + http_requests_total{path="/foo"} 0 1 2 3 2 3 4 -eval instant at 30m increase(http_requests[30m]) +eval instant at 30m increase(http_requests_total[30m]) {path="/foo"} 7 clear # Tests for rate(). load 5m - testcounter_reset_middle 0+27x4 0+27x5 - testcounter_reset_end 0+10x9 0 10 + testcounter_reset_middle_total 0+27x4 0+27x5 + testcounter_reset_end_total 0+10x9 0 10 # Counter resets at in the middle of range are handled correctly by rate(). -eval instant at 50m rate(testcounter_reset_middle[50m]) +eval instant at 50m rate(testcounter_reset_middle_total[50m]) {} 0.08 # Counter resets at end of range are ignored by rate(). -eval instant at 50m rate(testcounter_reset_end[5m]) +eval instant at 50m rate(testcounter_reset_end_total[5m]) -eval instant at 50m rate(testcounter_reset_end[6m]) +eval instant at 50m rate(testcounter_reset_end_total[6m]) {} 0 clear load 5m - calculate_rate_offset{x="a"} 0+10x10 - calculate_rate_offset{x="b"} 0+20x10 - calculate_rate_window 0+80x10 + calculate_rate_offset_total{x="a"} 0+10x10 + calculate_rate_offset_total{x="b"} 0+20x10 + calculate_rate_window_total 0+80x10 # Rates should calculate per-second rates. -eval instant at 50m rate(calculate_rate_window[50m]) +eval instant at 50m rate(calculate_rate_window_total[50m]) {} 0.26666666666666666 -eval instant at 50m rate(calculate_rate_offset[10m] offset 5m) +eval instant at 50m rate(calculate_rate_offset_total[10m] offset 5m) {x="a"} 0.03333333333333333 {x="b"} 0.06666666666666667 clear load 4m - testcounter_zero_cutoff{start="0m"} 0+240x10 - testcounter_zero_cutoff{start="1m"} 60+240x10 - testcounter_zero_cutoff{start="2m"} 120+240x10 - testcounter_zero_cutoff{start="3m"} 180+240x10 - testcounter_zero_cutoff{start="4m"} 240+240x10 - testcounter_zero_cutoff{start="5m"} 300+240x10 + testcounter_zero_cutoff_total{start="0m"} 0+240x10 + testcounter_zero_cutoff_total{start="1m"} 60+240x10 + testcounter_zero_cutoff_total{start="2m"} 120+240x10 + testcounter_zero_cutoff_total{start="3m"} 180+240x10 + testcounter_zero_cutoff_total{start="4m"} 240+240x10 + testcounter_zero_cutoff_total{start="5m"} 300+240x10 # Zero cutoff for left-side extrapolation happens until we # reach half a sampling interval (2m). Beyond that, we only # extrapolate by half a sampling interval. -eval instant at 10m rate(testcounter_zero_cutoff[20m]) +eval instant at 10m rate(testcounter_zero_cutoff_total[20m]) {start="0m"} 0.5 {start="1m"} 0.55 {start="2m"} 0.6 @@ -174,7 +174,7 @@ eval instant at 10m rate(testcounter_zero_cutoff[20m]) {start="5m"} 0.6 # Normal half-interval cutoff for left-side extrapolation. -eval instant at 50m rate(testcounter_zero_cutoff[20m]) +eval instant at 50m rate(testcounter_zero_cutoff_total[20m]) {start="0m"} 0.6 {start="1m"} 0.6 {start="2m"} 0.6 @@ -186,15 +186,15 @@ clear # Tests for irate(). load 5m - http_requests{path="/foo"} 0+10x10 - http_requests{path="/bar"} 0+10x5 0+10x5 + http_requests_total{path="/foo"} 0+10x10 + http_requests_total{path="/bar"} 0+10x5 0+10x5 -eval instant at 50m irate(http_requests[50m]) +eval instant at 50m irate(http_requests_total[50m]) {path="/foo"} .03333333333333333333 {path="/bar"} .03333333333333333333 # Counter reset. -eval instant at 30m irate(http_requests[50m]) +eval instant at 30m irate(http_requests_total[50m]) {path="/foo"} .03333333333333333333 {path="/bar"} 0 @@ -224,18 +224,18 @@ clear # Tests for deriv() and predict_linear(). load 5m - testcounter_reset_middle 0+10x4 0+10x5 - http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + testcounter_reset_middle_total 0+10x4 0+10x5 + http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10 # deriv should return the same as rate in simple cases. -eval instant at 50m rate(http_requests{group="canary", instance="1", job="app-server"}[50m]) +eval instant at 50m rate(http_requests_total{group="canary", instance="1", job="app-server"}[50m]) {group="canary", instance="1", job="app-server"} 0.26666666666666666 -eval instant at 50m deriv(http_requests{group="canary", instance="1", job="app-server"}[50m]) +eval instant at 50m deriv(http_requests_total{group="canary", instance="1", job="app-server"}[50m]) {group="canary", instance="1", job="app-server"} 0.26666666666666666 # deriv should return correct result. -eval instant at 50m deriv(testcounter_reset_middle[100m]) +eval instant at 50m deriv(testcounter_reset_middle_total[100m]) {} 0.010606060606060607 # predict_linear should return correct result. @@ -252,31 +252,31 @@ eval instant at 50m deriv(testcounter_reset_middle[100m]) # intercept at t=0: 6.818181818181818 # intercept at t=3000: 38.63636363636364 # intercept at t=3000+3600: 76.81818181818181 -eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600) +eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 3600) {} 70 -eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h) +eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 1h) {} 70 # intercept at t = 3000+3600 = 6600 -eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) +eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) {} 76.81818181818181 -eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 1h) +eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 1h) {} 76.81818181818181 # intercept at t = 600+3600 = 4200 -eval instant at 10m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) +eval instant at 10m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) {} 51.36363636363637 # intercept at t = 4200+3600 = 7800 -eval instant at 70m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) +eval instant at 70m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) {} 89.54545454545455 -# With http_requests, there is a sample value exactly at the end of +# With http_requests_total, there is a sample value exactly at the end of # the range, and it has exactly the predicted value, so predict_linear # can be emulated with deriv. -eval instant at 50m predict_linear(http_requests[50m], 3600) - (http_requests + deriv(http_requests[50m]) * 3600) +eval instant at 50m predict_linear(http_requests_total[50m], 3600) - (http_requests_total + deriv(http_requests_total[50m]) * 3600) {group="canary", instance="1", job="app-server"} 0 clear @@ -1073,49 +1073,49 @@ eval instant at 50m absent(rate(nonexistant[5m])) clear # Testdata for absent_over_time() -eval instant at 1m absent_over_time(http_requests[5m]) +eval instant at 1m absent_over_time(http_requests_total[5m]) {} 1 -eval instant at 1m absent_over_time(http_requests{handler="/foo"}[5m]) +eval instant at 1m absent_over_time(http_requests_total{handler="/foo"}[5m]) {handler="/foo"} 1 -eval instant at 1m absent_over_time(http_requests{handler!="/foo"}[5m]) +eval instant at 1m absent_over_time(http_requests_total{handler!="/foo"}[5m]) {} 1 -eval instant at 1m absent_over_time(http_requests{handler="/foo", handler="/bar", handler="/foobar"}[5m]) +eval instant at 1m absent_over_time(http_requests_total{handler="/foo", handler="/bar", handler="/foobar"}[5m]) {} 1 eval instant at 1m absent_over_time(rate(nonexistant[5m])[5m:]) {} 1 -eval instant at 1m absent_over_time(http_requests{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) +eval instant at 1m absent_over_time(http_requests_total{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) {instance="127.0.0.1"} 1 load 1m - http_requests{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10 - http_requests{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10 + http_requests_total{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10 + http_requests_total{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10 httpd_handshake_failures_total{instance="127.0.0.1",job="node"} 1+1x15 httpd_log_lines_total{instance="127.0.0.1",job="node"} 1 ssl_certificate_expiry_seconds{job="ingress"} NaN NaN NaN NaN NaN -eval instant at 5m absent_over_time(http_requests[5m]) +eval instant at 5m absent_over_time(http_requests_total[5m]) -eval instant at 5m absent_over_time(rate(http_requests[5m])[5m:1m]) +eval instant at 5m absent_over_time(rate(http_requests_total[5m])[5m:1m]) eval instant at 0m absent_over_time(httpd_log_lines_total[30s]) eval instant at 1m absent_over_time(httpd_log_lines_total[30s]) {} 1 -eval instant at 15m absent_over_time(http_requests[5m]) +eval instant at 15m absent_over_time(http_requests_total[5m]) {} 1 -eval instant at 15m absent_over_time(http_requests[10m]) +eval instant at 15m absent_over_time(http_requests_total[10m]) -eval instant at 16m absent_over_time(http_requests[6m]) +eval instant at 16m absent_over_time(http_requests_total[6m]) {} 1 -eval instant at 16m absent_over_time(http_requests[16m]) +eval instant at 16m absent_over_time(http_requests_total[16m]) eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m]) {} 1 @@ -1143,30 +1143,30 @@ eval instant at 10m absent_over_time({job="ingress"}[4m]) clear # Testdata for present_over_time() -eval instant at 1m present_over_time(http_requests[5m]) +eval instant at 1m present_over_time(http_requests_total[5m]) -eval instant at 1m present_over_time(http_requests{handler="/foo"}[5m]) +eval instant at 1m present_over_time(http_requests_total{handler="/foo"}[5m]) -eval instant at 1m present_over_time(http_requests{handler!="/foo"}[5m]) +eval instant at 1m present_over_time(http_requests_total{handler!="/foo"}[5m]) -eval instant at 1m present_over_time(http_requests{handler="/foo", handler="/bar", handler="/foobar"}[5m]) +eval instant at 1m present_over_time(http_requests_total{handler="/foo", handler="/bar", handler="/foobar"}[5m]) eval instant at 1m present_over_time(rate(nonexistant[5m])[5m:]) -eval instant at 1m present_over_time(http_requests{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) +eval instant at 1m present_over_time(http_requests_total{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) load 1m - http_requests{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10 - http_requests{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10 + http_requests_total{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10 + http_requests_total{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10 httpd_handshake_failures_total{instance="127.0.0.1",job="node"} 1+1x15 httpd_log_lines_total{instance="127.0.0.1",job="node"} 1 ssl_certificate_expiry_seconds{job="ingress"} NaN NaN NaN NaN NaN -eval instant at 5m present_over_time(http_requests[5m]) +eval instant at 5m present_over_time(http_requests_total[5m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 -eval instant at 5m present_over_time(rate(http_requests[5m])[5m:1m]) +eval instant at 5m present_over_time(rate(http_requests_total[5m])[5m:1m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 @@ -1175,15 +1175,15 @@ eval instant at 0m present_over_time(httpd_log_lines_total[30s]) eval instant at 1m present_over_time(httpd_log_lines_total[30s]) -eval instant at 15m present_over_time(http_requests[5m]) +eval instant at 15m present_over_time(http_requests_total[5m]) -eval instant at 15m present_over_time(http_requests[10m]) +eval instant at 15m present_over_time(http_requests_total[10m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 -eval instant at 16m present_over_time(http_requests[6m]) +eval instant at 16m present_over_time(http_requests_total[6m]) -eval instant at 16m present_over_time(http_requests[16m]) +eval instant at 16m present_over_time(http_requests_total[16m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test index 6089fd01d20..8ab23640af1 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test @@ -452,14 +452,14 @@ load 5m nonmonotonic_bucket{le="1000"} 0+9x10 nonmonotonic_bucket{le="+Inf"} 0+8x10 -# Nonmonotonic buckets -eval instant at 50m histogram_quantile(0.01, nonmonotonic_bucket) +# Nonmonotonic buckets, triggering an info annotation. +eval_info instant at 50m histogram_quantile(0.01, nonmonotonic_bucket) {} 0.0045 -eval instant at 50m histogram_quantile(0.5, nonmonotonic_bucket) +eval_info instant at 50m histogram_quantile(0.5, nonmonotonic_bucket) {} 8.5 -eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) +eval_info instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) {} 979.75 # Buckets with different representations of the same upper bound. diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test index d4a2ad257e1..9af45a73240 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test @@ -1,88 +1,88 @@ # Test for __name__ label drop. load 5m - metric{env="1"} 0 60 120 - another_metric{env="1"} 60 120 180 + metric_total{env="1"} 0 60 120 + another_metric_total{env="1"} 60 120 180 -# Does not drop __name__ for vector selector -eval instant at 10m metric{env="1"} - metric{env="1"} 120 +# Does not drop __name__ for vector selector. +eval instant at 10m metric_total{env="1"} + metric_total{env="1"} 120 -# Drops __name__ for unary operators -eval instant at 10m -metric +# Drops __name__ for unary operators. +eval instant at 10m -metric_total {env="1"} -120 -# Drops __name__ for binary operators -eval instant at 10m metric + another_metric +# Drops __name__ for binary operators. +eval instant at 10m metric_total + another_metric_total {env="1"} 300 -# Does not drop __name__ for binary comparison operators -eval instant at 10m metric <= another_metric - metric{env="1"} 120 +# Does not drop __name__ for binary comparison operators. +eval instant at 10m metric_total <= another_metric_total + metric_total{env="1"} 120 -# Drops __name__ for binary comparison operators with "bool" modifier -eval instant at 10m metric <= bool another_metric +# Drops __name__ for binary comparison operators with "bool" modifier. +eval instant at 10m metric_total <= bool another_metric_total {env="1"} 1 -# Drops __name__ for vector-scalar operations -eval instant at 10m metric * 2 +# Drops __name__ for vector-scalar operations. +eval instant at 10m metric_total * 2 {env="1"} 240 -# Drops __name__ for instant-vector functions -eval instant at 10m clamp(metric, 0, 100) +# Drops __name__ for instant-vector functions. +eval instant at 10m clamp(metric_total, 0, 100) {env="1"} 100 -# Drops __name__ for round function -eval instant at 10m round(metric) +# Drops __name__ for round function. +eval instant at 10m round(metric_total) {env="1"} 120 -# Drops __name__ for range-vector functions -eval instant at 10m rate(metric{env="1"}[10m]) +# Drops __name__ for range-vector functions. +eval instant at 10m rate(metric_total{env="1"}[10m]) {env="1"} 0.2 -# Does not drop __name__ for last_over_time function -eval instant at 10m last_over_time(metric{env="1"}[10m]) - metric{env="1"} 120 +# Does not drop __name__ for last_over_time function. +eval instant at 10m last_over_time(metric_total{env="1"}[10m]) + metric_total{env="1"} 120 -# Drops name for other _over_time functions -eval instant at 10m max_over_time(metric{env="1"}[10m]) +# Drops name for other _over_time functions. +eval instant at 10m max_over_time(metric_total{env="1"}[10m]) {env="1"} 120 -# Allows relabeling (to-be-dropped) __name__ via label_replace +# Allows relabeling (to-be-dropped) __name__ via label_replace. eval instant at 10m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)") - {my_name="rate_metric", env="1"} 0.2 - {my_name="rate_another_metric", env="1"} 0.2 + {my_name="rate_metric_total", env="1"} 0.2 + {my_name="rate_another_metric_total", env="1"} 0.2 -# Allows preserving __name__ via label_replace +# Allows preserving __name__ via label_replace. eval instant at 10m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)") - rate_metric{env="1"} 0.2 - rate_another_metric{env="1"} 0.2 + rate_metric_total{env="1"} 0.2 + rate_another_metric_total{env="1"} 0.2 -# Allows relabeling (to-be-dropped) __name__ via label_join +# Allows relabeling (to-be-dropped) __name__ via label_join. eval instant at 10m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__") - {my_name="metric", env="1"} 0.2 - {my_name="another_metric", env="1"} 0.2 + {my_name="metric_total", env="1"} 0.2 + {my_name="another_metric_total", env="1"} 0.2 -# Allows preserving __name__ via label_join +# Allows preserving __name__ via label_join. eval instant at 10m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env") - metric_1{env="1"} 0.2 - another_metric_1{env="1"} 0.2 + metric_total_1{env="1"} 0.2 + another_metric_total_1{env="1"} 0.2 -# Does not drop metric names fro aggregation operators -eval instant at 10m sum by (__name__, env) (metric{env="1"}) - metric{env="1"} 120 +# Does not drop metric names from aggregation operators. +eval instant at 10m sum by (__name__, env) (metric_total{env="1"}) + metric_total{env="1"} 120 -# Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label) +# Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label). # This is an accidental side effect of delayed __name__ label dropping eval_fail instant at 10m sum by (__name__) (rate({env="1"}[10m])) -# Aggregation operators aggregate metrics with same labelset and to-be-dropped names +# Aggregation operators aggregate metrics with same labelset and to-be-dropped names. # This is an accidental side effect of delayed __name__ label dropping eval instant at 10m sum(rate({env="1"}[10m])) by (env) {env="1"} 0.4 -# Aggregationk operators propagate __name__ label dropping information -eval instant at 10m topk(10, sum by (__name__, env) (metric{env="1"})) - metric{env="1"} 120 +# Aggregationk operators propagate __name__ label dropping information. +eval instant at 10m topk(10, sum by (__name__, env) (metric_total{env="1"})) + metric_total{env="1"} 120 -eval instant at 10m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m]))) +eval instant at 10m topk(10, sum by (__name__, env) (rate(metric_total{env="1"}[10m]))) {env="1"} 0.2 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test index 0463384e2e9..6be298cf7d6 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test @@ -1186,3 +1186,23 @@ eval instant at 3m avg_over_time(histogram_sum_over_time[4m:1m]) {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}} clear + +# Test native histograms with sub operator. +load 10m + histogram_sub_1{idx="0"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 + histogram_sub_1{idx="1"} {{schema:0 count:11 sum:1234.5 z_bucket:3 z_bucket_w:0.001 buckets:[0 2 1] n_buckets:[0 0 3 2]}}x1 + histogram_sub_2{idx="0"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 + histogram_sub_2{idx="1"} {{schema:1 count:11 sum:1234.5 z_bucket:3 z_bucket_w:0.001 buckets:[0 2 1] n_buckets:[0 0 3 2]}}x1 + histogram_sub_3{idx="0"} {{schema:1 count:11 sum:1234.5 z_bucket:3 z_bucket_w:0.001 buckets:[0 2 1] n_buckets:[0 0 3 2]}}x1 + histogram_sub_3{idx="1"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 + +eval instant at 10m histogram_sub_1{idx="0"} - ignoring(idx) histogram_sub_1{idx="1"} + {} {{schema:0 count:30 sum:1111.1 z_bucket:2 z_bucket_w:0.001 buckets:[1 1 0 2 1 1 1] n_buckets:[0 1 1 0 7 0 0 0 0 5 5 2]}} + +eval instant at 10m histogram_sub_2{idx="0"} - ignoring(idx) histogram_sub_2{idx="1"} + {} {{schema:0 count:30 sum:1111.1 z_bucket:2 z_bucket_w:0.001 buckets:[1 0 1 2 1 1 1] n_buckets:[0 -2 2 2 7 0 0 0 0 5 5 2]}} + +eval instant at 10m histogram_sub_3{idx="0"} - ignoring(idx) histogram_sub_3{idx="1"} + {} {{schema:0 count:-30 sum:-1111.1 z_bucket:-2 z_bucket_w:0.001 buckets:[-1 0 -1 -2 -1 -1 -1] n_buckets:[0 2 -2 -2 -7 0 0 0 0 -5 -5 -2]}} + +clear diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test index 4b00831dfc8..667989ca77d 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test @@ -1,12 +1,12 @@ load 5m - http_requests{job="api-server", instance="0", group="production"} 0+10x10 - http_requests{job="api-server", instance="1", group="production"} 0+20x10 - http_requests{job="api-server", instance="0", group="canary"} 0+30x10 - http_requests{job="api-server", instance="1", group="canary"} 0+40x10 - http_requests{job="app-server", instance="0", group="production"} 0+50x10 - http_requests{job="app-server", instance="1", group="production"} 0+60x10 - http_requests{job="app-server", instance="0", group="canary"} 0+70x10 - http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + http_requests_total{job="api-server", instance="0", group="production"} 0+10x10 + http_requests_total{job="api-server", instance="1", group="production"} 0+20x10 + http_requests_total{job="api-server", instance="0", group="canary"} 0+30x10 + http_requests_total{job="api-server", instance="1", group="canary"} 0+40x10 + http_requests_total{job="app-server", instance="0", group="production"} 0+50x10 + http_requests_total{job="app-server", instance="1", group="production"} 0+60x10 + http_requests_total{job="app-server", instance="0", group="canary"} 0+70x10 + http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10 http_requests_histogram{job="app-server", instance="1", group="production"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}}x11 load 5m @@ -15,21 +15,21 @@ load 5m vector_matching_b{l="x"} 0+4x25 -eval instant at 50m SUM(http_requests) BY (job) - COUNT(http_requests) BY (job) +eval instant at 50m SUM(http_requests_total) BY (job) - COUNT(http_requests_total) BY (job) {job="api-server"} 996 {job="app-server"} 2596 -eval instant at 50m 2 - SUM(http_requests) BY (job) +eval instant at 50m 2 - SUM(http_requests_total) BY (job) {job="api-server"} -998 {job="app-server"} -2598 -eval instant at 50m -http_requests{job="api-server",instance="0",group="production"} +eval instant at 50m -http_requests_total{job="api-server",instance="0",group="production"} {job="api-server",instance="0",group="production"} -100 -eval instant at 50m +http_requests{job="api-server",instance="0",group="production"} - http_requests{job="api-server",instance="0",group="production"} 100 +eval instant at 50m +http_requests_total{job="api-server",instance="0",group="production"} + http_requests_total{job="api-server",instance="0",group="production"} 100 -eval instant at 50m - - - SUM(http_requests) BY (job) +eval instant at 50m - - - SUM(http_requests_total) BY (job) {job="api-server"} -1000 {job="app-server"} -2600 @@ -42,83 +42,83 @@ eval instant at 50m -2^---1*3 eval instant at 50m 2/-2^---1*3+2 -10 -eval instant at 50m -10^3 * - SUM(http_requests) BY (job) ^ -1 +eval instant at 50m -10^3 * - SUM(http_requests_total) BY (job) ^ -1 {job="api-server"} 1 {job="app-server"} 0.38461538461538464 -eval instant at 50m 1000 / SUM(http_requests) BY (job) +eval instant at 50m 1000 / SUM(http_requests_total) BY (job) {job="api-server"} 1 {job="app-server"} 0.38461538461538464 -eval instant at 50m SUM(http_requests) BY (job) - 2 +eval instant at 50m SUM(http_requests_total) BY (job) - 2 {job="api-server"} 998 {job="app-server"} 2598 -eval instant at 50m SUM(http_requests) BY (job) % 3 +eval instant at 50m SUM(http_requests_total) BY (job) % 3 {job="api-server"} 1 {job="app-server"} 2 -eval instant at 50m SUM(http_requests) BY (job) % 0.3 +eval instant at 50m SUM(http_requests_total) BY (job) % 0.3 {job="api-server"} 0.1 {job="app-server"} 0.2 -eval instant at 50m SUM(http_requests) BY (job) ^ 2 +eval instant at 50m SUM(http_requests_total) BY (job) ^ 2 {job="api-server"} 1000000 {job="app-server"} 6760000 -eval instant at 50m SUM(http_requests) BY (job) % 3 ^ 2 +eval instant at 50m SUM(http_requests_total) BY (job) % 3 ^ 2 {job="api-server"} 1 {job="app-server"} 8 -eval instant at 50m SUM(http_requests) BY (job) % 2 ^ (3 ^ 2) +eval instant at 50m SUM(http_requests_total) BY (job) % 2 ^ (3 ^ 2) {job="api-server"} 488 {job="app-server"} 40 -eval instant at 50m SUM(http_requests) BY (job) % 2 ^ 3 ^ 2 +eval instant at 50m SUM(http_requests_total) BY (job) % 2 ^ 3 ^ 2 {job="api-server"} 488 {job="app-server"} 40 -eval instant at 50m SUM(http_requests) BY (job) % 2 ^ 3 ^ 2 ^ 2 +eval instant at 50m SUM(http_requests_total) BY (job) % 2 ^ 3 ^ 2 ^ 2 {job="api-server"} 1000 {job="app-server"} 2600 -eval instant at 50m COUNT(http_requests) BY (job) ^ COUNT(http_requests) BY (job) +eval instant at 50m COUNT(http_requests_total) BY (job) ^ COUNT(http_requests_total) BY (job) {job="api-server"} 256 {job="app-server"} 256 -eval instant at 50m SUM(http_requests) BY (job) / 0 +eval instant at 50m SUM(http_requests_total) BY (job) / 0 {job="api-server"} +Inf {job="app-server"} +Inf -eval instant at 50m http_requests{group="canary", instance="0", job="api-server"} / 0 +eval instant at 50m http_requests_total{group="canary", instance="0", job="api-server"} / 0 {group="canary", instance="0", job="api-server"} +Inf -eval instant at 50m -1 * http_requests{group="canary", instance="0", job="api-server"} / 0 +eval instant at 50m -1 * http_requests_total{group="canary", instance="0", job="api-server"} / 0 {group="canary", instance="0", job="api-server"} -Inf -eval instant at 50m 0 * http_requests{group="canary", instance="0", job="api-server"} / 0 +eval instant at 50m 0 * http_requests_total{group="canary", instance="0", job="api-server"} / 0 {group="canary", instance="0", job="api-server"} NaN -eval instant at 50m 0 * http_requests{group="canary", instance="0", job="api-server"} % 0 +eval instant at 50m 0 * http_requests_total{group="canary", instance="0", job="api-server"} % 0 {group="canary", instance="0", job="api-server"} NaN -eval instant at 50m SUM(http_requests) BY (job) + SUM(http_requests) BY (job) +eval instant at 50m SUM(http_requests_total) BY (job) + SUM(http_requests_total) BY (job) {job="api-server"} 2000 {job="app-server"} 5200 -eval instant at 50m (SUM((http_requests)) BY (job)) + SUM(http_requests) BY (job) +eval instant at 50m (SUM((http_requests_total)) BY (job)) + SUM(http_requests_total) BY (job) {job="api-server"} 2000 {job="app-server"} 5200 -eval instant at 50m http_requests{job="api-server", group="canary"} - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="1", job="api-server"} 400 +eval instant at 50m http_requests_total{job="api-server", group="canary"} + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="1", job="api-server"} 400 -eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[10m]) * 5 * 60 +eval instant at 50m http_requests_total{job="api-server", group="canary"} + rate(http_requests_total{job="api-server"}[10m]) * 5 * 60 {group="canary", instance="0", job="api-server"} 330 {group="canary", instance="1", job="api-server"} 440 -eval instant at 50m rate(http_requests[25m]) * 25 * 60 +eval instant at 50m rate(http_requests_total[25m]) * 25 * 60 {group="canary", instance="0", job="api-server"} 150 {group="canary", instance="0", job="app-server"} 350 {group="canary", instance="1", job="api-server"} 200 @@ -128,7 +128,7 @@ eval instant at 50m rate(http_requests[25m]) * 25 * 60 {group="production", instance="1", job="api-server"} 100 {group="production", instance="1", job="app-server"} 300 -eval instant at 50m (rate((http_requests[25m])) * 25) * 60 +eval instant at 50m (rate((http_requests_total[25m])) * 25) * 60 {group="canary", instance="0", job="api-server"} 150 {group="canary", instance="0", job="app-server"} 350 {group="canary", instance="1", job="api-server"} 200 @@ -139,53 +139,53 @@ eval instant at 50m (rate((http_requests[25m])) * 25) * 60 {group="production", instance="1", job="app-server"} 300 -eval instant at 50m http_requests{group="canary"} and http_requests{instance="0"} - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 +eval instant at 50m http_requests_total{group="canary"} and http_requests_total{instance="0"} + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 -eval instant at 50m (http_requests{group="canary"} + 1) and http_requests{instance="0"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and http_requests_total{instance="0"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m (http_requests{group="canary"} + 1) and on(instance, job) http_requests{instance="0", group="production"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and on(instance, job) http_requests_total{instance="0", group="production"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m (http_requests{group="canary"} + 1) and on(instance) http_requests{instance="0", group="production"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and on(instance) http_requests_total{instance="0", group="production"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m (http_requests{group="canary"} + 1) and ignoring(group) http_requests{instance="0", group="production"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and ignoring(group) http_requests_total{instance="0", group="production"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m (http_requests{group="canary"} + 1) and ignoring(group, job) http_requests{instance="0", group="production"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and ignoring(group, job) http_requests_total{instance="0", group="production"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m http_requests{group="canary"} or http_requests{group="production"} - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 +eval instant at 50m http_requests_total{group="canary"} or http_requests_total{group="production"} + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 # On overlap the rhs samples must be dropped. -eval instant at 50m (http_requests{group="canary"} + 1) or http_requests{instance="1"} +eval instant at 50m (http_requests_total{group="canary"} + 1) or http_requests_total{instance="1"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 {group="canary", instance="1", job="api-server"} 401 {group="canary", instance="1", job="app-server"} 801 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 # Matching only on instance excludes everything that has instance=0/1 but includes # entries without the instance label. -eval instant at 50m (http_requests{group="canary"} + 1) or on(instance) (http_requests or cpu_count or vector_matching_a) +eval instant at 50m (http_requests_total{group="canary"} + 1) or on(instance) (http_requests_total or cpu_count or vector_matching_a) {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 {group="canary", instance="1", job="api-server"} 401 @@ -193,7 +193,7 @@ eval instant at 50m (http_requests{group="canary"} + 1) or on(instance) (http_re vector_matching_a{l="x"} 10 vector_matching_a{l="y"} 20 -eval instant at 50m (http_requests{group="canary"} + 1) or ignoring(l, group, job) (http_requests or cpu_count or vector_matching_a) +eval instant at 50m (http_requests_total{group="canary"} + 1) or ignoring(l, group, job) (http_requests_total or cpu_count or vector_matching_a) {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 {group="canary", instance="1", job="api-server"} 401 @@ -201,81 +201,81 @@ eval instant at 50m (http_requests{group="canary"} + 1) or ignoring(l, group, jo vector_matching_a{l="x"} 10 vector_matching_a{l="y"} 20 -eval instant at 50m http_requests{group="canary"} unless http_requests{instance="0"} - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 +eval instant at 50m http_requests_total{group="canary"} unless http_requests_total{instance="0"} + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 -eval instant at 50m http_requests{group="canary"} unless on(job) http_requests{instance="0"} +eval instant at 50m http_requests_total{group="canary"} unless on(job) http_requests_total{instance="0"} -eval instant at 50m http_requests{group="canary"} unless on(job, instance) http_requests{instance="0"} - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 +eval instant at 50m http_requests_total{group="canary"} unless on(job, instance) http_requests_total{instance="0"} + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 -eval instant at 50m http_requests{group="canary"} / on(instance,job) http_requests{group="production"} +eval instant at 50m http_requests_total{group="canary"} / on(instance,job) http_requests_total{group="production"} {instance="0", job="api-server"} 3 {instance="0", job="app-server"} 1.4 {instance="1", job="api-server"} 2 {instance="1", job="app-server"} 1.3333333333333333 -eval instant at 50m http_requests{group="canary"} unless ignoring(group, instance) http_requests{instance="0"} +eval instant at 50m http_requests_total{group="canary"} unless ignoring(group, instance) http_requests_total{instance="0"} -eval instant at 50m http_requests{group="canary"} unless ignoring(group) http_requests{instance="0"} - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 +eval instant at 50m http_requests_total{group="canary"} unless ignoring(group) http_requests_total{instance="0"} + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 -eval instant at 50m http_requests{group="canary"} / ignoring(group) http_requests{group="production"} +eval instant at 50m http_requests_total{group="canary"} / ignoring(group) http_requests_total{group="production"} {instance="0", job="api-server"} 3 {instance="0", job="app-server"} 1.4 {instance="1", job="api-server"} 2 {instance="1", job="app-server"} 1.3333333333333333 # https://github.com/prometheus/prometheus/issues/1489 -eval instant at 50m http_requests AND ON (dummy) vector(1) - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 - -eval instant at 50m http_requests AND IGNORING (group, instance, job) vector(1) - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 +eval instant at 50m http_requests_total AND ON (dummy) vector(1) + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 + +eval instant at 50m http_requests_total AND IGNORING (group, instance, job) vector(1) + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 # Comparisons. -eval instant at 50m SUM(http_requests) BY (job) > 1000 +eval instant at 50m SUM(http_requests_total) BY (job) > 1000 {job="app-server"} 2600 -eval instant at 50m 1000 < SUM(http_requests) BY (job) +eval instant at 50m 1000 < SUM(http_requests_total) BY (job) {job="app-server"} 2600 -eval instant at 50m SUM(http_requests) BY (job) <= 1000 +eval instant at 50m SUM(http_requests_total) BY (job) <= 1000 {job="api-server"} 1000 -eval instant at 50m SUM(http_requests) BY (job) != 1000 +eval instant at 50m SUM(http_requests_total) BY (job) != 1000 {job="app-server"} 2600 -eval instant at 50m SUM(http_requests) BY (job) == 1000 +eval instant at 50m SUM(http_requests_total) BY (job) == 1000 {job="api-server"} 1000 -eval instant at 50m SUM(http_requests) BY (job) == bool 1000 +eval instant at 50m SUM(http_requests_total) BY (job) == bool 1000 {job="api-server"} 1 {job="app-server"} 0 -eval instant at 50m SUM(http_requests) BY (job) == bool SUM(http_requests) BY (job) +eval instant at 50m SUM(http_requests_total) BY (job) == bool SUM(http_requests_total) BY (job) {job="api-server"} 1 {job="app-server"} 1 -eval instant at 50m SUM(http_requests) BY (job) != bool SUM(http_requests) BY (job) +eval instant at 50m SUM(http_requests_total) BY (job) != bool SUM(http_requests_total) BY (job) {job="api-server"} 0 {job="app-server"} 0 @@ -285,12 +285,12 @@ eval instant at 50m 0 == bool 1 eval instant at 50m 1 == bool 1 1 -eval instant at 50m http_requests{job="api-server", instance="0", group="production"} == bool 100 +eval instant at 50m http_requests_total{job="api-server", instance="0", group="production"} == bool 100 {job="api-server", instance="0", group="production"} 1 # The histogram is ignored here so the result doesn't change but it has an info annotation now. eval_info instant at 5m {job="app-server"} == 80 - http_requests{group="canary", instance="1", job="app-server"} 80 + http_requests_total{group="canary", instance="1", job="app-server"} 80 eval_info instant at 5m http_requests_histogram != 80 @@ -673,7 +673,7 @@ eval_info range from 0 to 24m step 6m left_histograms == 0 eval_info range from 0 to 24m step 6m left_histograms != 3 # No results. -eval range from 0 to 24m step 6m left_histograms != 0 +eval_info range from 0 to 24m step 6m left_histograms != 0 # No results. eval_info range from 0 to 24m step 6m left_histograms > 3 @@ -682,7 +682,7 @@ eval_info range from 0 to 24m step 6m left_histograms > 3 eval_info range from 0 to 24m step 6m left_histograms > 0 # No results. -eval range from 0 to 24m step 6m left_histograms >= 3 +eval_info range from 0 to 24m step 6m left_histograms >= 3 # No results. eval_info range from 0 to 24m step 6m left_histograms >= 0 @@ -697,7 +697,7 @@ eval_info range from 0 to 24m step 6m left_histograms < 0 eval_info range from 0 to 24m step 6m left_histograms <= 3 # No results. -eval range from 0 to 24m step 6m left_histograms <= 0 +eval_info range from 0 to 24m step 6m left_histograms <= 0 # No results. eval_info range from 0 to 24m step 6m left_histograms == bool 3 @@ -770,40 +770,87 @@ eval range from 0 to 60m step 6m NaN == left_floats eval range from 0 to 60m step 6m NaN == bool left_floats {} 0 0 _ _ 0 _ 0 0 0 0 0 -eval range from 0 to 24m step 6m 3 == left_histograms +eval_info range from 0 to 24m step 6m 3 == left_histograms # No results. -eval range from 0 to 24m step 6m 0 == left_histograms +eval_info range from 0 to 24m step 6m 0 == left_histograms # No results. -eval range from 0 to 24m step 6m 3 != left_histograms +eval_info range from 0 to 24m step 6m 3 != left_histograms # No results. -eval range from 0 to 24m step 6m 0 != left_histograms +eval_info range from 0 to 24m step 6m 0 != left_histograms # No results. -eval range from 0 to 24m step 6m 3 < left_histograms +eval_info range from 0 to 24m step 6m 3 < left_histograms # No results. -eval range from 0 to 24m step 6m 0 < left_histograms +eval_info range from 0 to 24m step 6m 0 < left_histograms # No results. -eval range from 0 to 24m step 6m 3 < left_histograms +eval_info range from 0 to 24m step 6m 3 < left_histograms # No results. -eval range from 0 to 24m step 6m 0 < left_histograms +eval_info range from 0 to 24m step 6m 0 < left_histograms # No results. -eval range from 0 to 24m step 6m 3 > left_histograms +eval_info range from 0 to 24m step 6m 3 > left_histograms # No results. -eval range from 0 to 24m step 6m 0 > left_histograms +eval_info range from 0 to 24m step 6m 0 > left_histograms # No results. -eval range from 0 to 24m step 6m 3 >= left_histograms +eval_info range from 0 to 24m step 6m 3 >= left_histograms # No results. -eval range from 0 to 24m step 6m 0 >= left_histograms +eval_info range from 0 to 24m step 6m 0 >= left_histograms # No results. clear + +# Test completely discarding or completely including series in results with "and on" +load_with_nhcb 5m + testhistogram_bucket{le="0.1", id="1"} 0+5x10 + testhistogram_bucket{le="0.2", id="1"} 0+7x10 + testhistogram_bucket{le="+Inf", id="1"} 0+12x10 + testhistogram_bucket{le="0.1", id="2"} 0+4x10 + testhistogram_bucket{le="0.2", id="2"} 0+6x10 + testhistogram_bucket{le="+Inf", id="2"} 0+11x10 + +# Include all series when "and on" with non-empty vector. +eval instant at 10m (testhistogram_bucket) and on() (vector(1) == 1) + {__name__="testhistogram_bucket", le="0.1", id="1"} 10.0 + {__name__="testhistogram_bucket", le="0.2", id="1"} 14.0 + {__name__="testhistogram_bucket", le="+Inf", id="1"} 24.0 + {__name__="testhistogram_bucket", le="0.1", id="2"} 8.0 + {__name__="testhistogram_bucket", le="0.2", id="2"} 12.0 + {__name__="testhistogram_bucket", le="+Inf", id="2"} 22.0 + +eval range from 0 to 10m step 5m (testhistogram_bucket) and on() (vector(1) == 1) + {__name__="testhistogram_bucket", le="0.1", id="1"} 0.0 5.0 10.0 + {__name__="testhistogram_bucket", le="0.2", id="1"} 0.0 7.0 14.0 + {__name__="testhistogram_bucket", le="+Inf", id="1"} 0.0 12.0 24.0 + {__name__="testhistogram_bucket", le="0.1", id="2"} 0.0 4.0 8.0 + {__name__="testhistogram_bucket", le="0.2", id="2"} 0.0 6.0 12.0 + {__name__="testhistogram_bucket", le="+Inf", id="2"} 0.0 11.0 22.0 + +# Exclude all series when "and on" with empty vector. +eval instant at 10m (testhistogram_bucket) and on() (vector(-1) == 1) + +eval range from 0 to 10m step 5m (testhistogram_bucket) and on() (vector(-1) == 1) + +# Include all native histogram series when "and on" with non-empty vector. +eval instant at 10m (testhistogram) and on() (vector(1) == 1) + {__name__="testhistogram", id="1"} {{schema:-53 sum:0 count:24 buckets:[10 4 10] custom_values:[0.1 0.2]}} + {__name__="testhistogram", id="2"} {{schema:-53 sum:0 count:22 buckets:[8 4 10] custom_values:[0.1 0.2]}} + +eval range from 0 to 10m step 5m (testhistogram) and on() (vector(1) == 1) + {__name__="testhistogram", id="1"} {{schema:-53 sum:0 count:0 custom_values:[0.1 0.2]}} {{schema:-53 sum:0 count:12 buckets:[5 2 5] custom_values:[0.1 0.2]}} {{schema:-53 sum:0 count:24 buckets:[10 4 10] custom_values:[0.1 0.2]}} + {__name__="testhistogram", id="2"} {{schema:-53 sum:0 count:0 custom_values:[0.1 0.2]}} {{schema:-53 sum:0 count:11 buckets:[4 2 5] custom_values:[0.1 0.2]}} {{schema:-53 sum:0 count:22 buckets:[8 4 10] custom_values:[0.1 0.2]}} + +# Exclude all native histogram series when "and on" with empty vector. +eval instant at 10m (testhistogram) and on() (vector(-1) == 1) + +eval range from 0 to 10m step 5m (testhistogram) and on() (vector(-1) == 1) + +clear diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/selectors.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/selectors.test index 6742d83e99c..3a1f5263dd3 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/selectors.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/selectors.test @@ -1,109 +1,109 @@ load 10s - http_requests{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 - http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 - http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 - http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 + http_requests_total{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 + http_requests_total{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 + http_requests_total{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 + http_requests_total{job="api-server", instance="1", group="canary"} 0+40x2000 -eval instant at 8000s rate(http_requests[1m]) +eval instant at 8000s rate(http_requests_total[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 {job="api-server", instance="0", group="canary"} 3 {job="api-server", instance="1", group="canary"} 4 -eval instant at 18000s rate(http_requests[1m]) +eval instant at 18000s rate(http_requests_total[1m]) {job="api-server", instance="0", group="production"} 3 {job="api-server", instance="1", group="production"} 3 {job="api-server", instance="0", group="canary"} 8 {job="api-server", instance="1", group="canary"} 4 -eval instant at 8000s rate(http_requests{group=~"pro.*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~"pro.*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 18000s rate(http_requests{group=~".*ry", instance="1"}[1m]) +eval instant at 18000s rate(http_requests_total{group=~".*ry", instance="1"}[1m]) {job="api-server", instance="1", group="canary"} 4 -eval instant at 18000s rate(http_requests{instance!="3"}[1m] offset 10000s) +eval instant at 18000s rate(http_requests_total{instance!="3"}[1m] offset 10000s) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 {job="api-server", instance="0", group="canary"} 3 {job="api-server", instance="1", group="canary"} 4 -eval instant at 4000s rate(http_requests{instance!="3"}[1m] offset -4000s) +eval instant at 4000s rate(http_requests_total{instance!="3"}[1m] offset -4000s) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 {job="api-server", instance="0", group="canary"} 3 {job="api-server", instance="1", group="canary"} 4 -eval instant at 18000s rate(http_requests[40s]) - rate(http_requests[1m] offset 10000s) +eval instant at 18000s rate(http_requests_total[40s]) - rate(http_requests_total[1m] offset 10000s) {job="api-server", instance="0", group="production"} 2 {job="api-server", instance="1", group="production"} 1 {job="api-server", instance="0", group="canary"} 5 {job="api-server", instance="1", group="canary"} 0 # https://github.com/prometheus/prometheus/issues/3575 -eval instant at 0s http_requests{foo!="bar"} - http_requests{job="api-server", instance="0", group="production"} 0 - http_requests{job="api-server", instance="1", group="production"} 0 - http_requests{job="api-server", instance="0", group="canary"} 0 - http_requests{job="api-server", instance="1", group="canary"} 0 - -eval instant at 0s http_requests{foo!="bar", job="api-server"} - http_requests{job="api-server", instance="0", group="production"} 0 - http_requests{job="api-server", instance="1", group="production"} 0 - http_requests{job="api-server", instance="0", group="canary"} 0 - http_requests{job="api-server", instance="1", group="canary"} 0 - -eval instant at 0s http_requests{foo!~"bar", job="api-server"} - http_requests{job="api-server", instance="0", group="production"} 0 - http_requests{job="api-server", instance="1", group="production"} 0 - http_requests{job="api-server", instance="0", group="canary"} 0 - http_requests{job="api-server", instance="1", group="canary"} 0 - -eval instant at 0s http_requests{foo!~"bar", job="api-server", instance="1", x!="y", z="", group!=""} - http_requests{job="api-server", instance="1", group="production"} 0 - http_requests{job="api-server", instance="1", group="canary"} 0 +eval instant at 0s http_requests_total{foo!="bar"} + http_requests_total{job="api-server", instance="0", group="production"} 0 + http_requests_total{job="api-server", instance="1", group="production"} 0 + http_requests_total{job="api-server", instance="0", group="canary"} 0 + http_requests_total{job="api-server", instance="1", group="canary"} 0 + +eval instant at 0s http_requests_total{foo!="bar", job="api-server"} + http_requests_total{job="api-server", instance="0", group="production"} 0 + http_requests_total{job="api-server", instance="1", group="production"} 0 + http_requests_total{job="api-server", instance="0", group="canary"} 0 + http_requests_total{job="api-server", instance="1", group="canary"} 0 + +eval instant at 0s http_requests_total{foo!~"bar", job="api-server"} + http_requests_total{job="api-server", instance="0", group="production"} 0 + http_requests_total{job="api-server", instance="1", group="production"} 0 + http_requests_total{job="api-server", instance="0", group="canary"} 0 + http_requests_total{job="api-server", instance="1", group="canary"} 0 + +eval instant at 0s http_requests_total{foo!~"bar", job="api-server", instance="1", x!="y", z="", group!=""} + http_requests_total{job="api-server", instance="1", group="production"} 0 + http_requests_total{job="api-server", instance="1", group="canary"} 0 # https://github.com/prometheus/prometheus/issues/7994 -eval instant at 8000s rate(http_requests{group=~"(?i:PRO).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~"(?i:PRO).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*?(?i:PRO).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*?(?i:PRO).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*(?i:DUC).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*(?i:DUC).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*(?i:TION)"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*(?i:TION)"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*(?i:TION).*?"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*(?i:TION).*?"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~"((?i)PRO).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~"((?i)PRO).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*((?i)DUC).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*((?i)DUC).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*((?i)TION)"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*((?i)TION)"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~"(?i:PRODUCTION)"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~"(?i:PRODUCTION)"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*(?i:C).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*(?i:C).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 {job="api-server", instance="0", group="canary"} 3 @@ -133,14 +133,14 @@ load 5m label_grouping_test{a="a", b="abb"} 0+20x10 load 5m - http_requests{job="api-server", instance="0", group="production"} 0+10x10 - http_requests{job="api-server", instance="1", group="production"} 0+20x10 - http_requests{job="api-server", instance="0", group="canary"} 0+30x10 - http_requests{job="api-server", instance="1", group="canary"} 0+40x10 - http_requests{job="app-server", instance="0", group="production"} 0+50x10 - http_requests{job="app-server", instance="1", group="production"} 0+60x10 - http_requests{job="app-server", instance="0", group="canary"} 0+70x10 - http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + http_requests_total{job="api-server", instance="0", group="production"} 0+10x10 + http_requests_total{job="api-server", instance="1", group="production"} 0+20x10 + http_requests_total{job="api-server", instance="0", group="canary"} 0+30x10 + http_requests_total{job="api-server", instance="1", group="canary"} 0+40x10 + http_requests_total{job="app-server", instance="0", group="production"} 0+50x10 + http_requests_total{job="app-server", instance="1", group="production"} 0+60x10 + http_requests_total{job="app-server", instance="0", group="canary"} 0+70x10 + http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10 # Single-letter label names and values. eval instant at 50m x{y="testvalue"} @@ -148,14 +148,14 @@ eval instant at 50m x{y="testvalue"} # Basic Regex eval instant at 50m {__name__=~".+"} - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 x{y="testvalue"} 100 label_grouping_test{a="a", b="abb"} 200 label_grouping_test{a="aa", b="bb"} 100 @@ -164,34 +164,34 @@ eval instant at 50m {__name__=~".+"} cpu_count{instance="0", type="numa"} 300 eval instant at 50m {job=~".+-server", job!~"api-.+"} - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="app-server"} 600 - -eval instant at 50m http_requests{group!="canary"} - http_requests{group="production", instance="1", job="app-server"} 600 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="0", job="api-server"} 100 - -eval instant at 50m http_requests{job=~".+-server",group!="canary"} - http_requests{group="production", instance="1", job="app-server"} 600 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="0", job="api-server"} 100 - -eval instant at 50m http_requests{job!~"api-.+",group!="canary"} - http_requests{group="production", instance="1", job="app-server"} 600 - http_requests{group="production", instance="0", job="app-server"} 500 - -eval instant at 50m http_requests{group="production",job=~"api-.+"} - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="1", job="api-server"} 200 - -eval instant at 50m http_requests{group="production",job="api-server"} offset 5m - http_requests{group="production", instance="0", job="api-server"} 90 - http_requests{group="production", instance="1", job="api-server"} 180 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="app-server"} 600 + +eval instant at 50m http_requests_total{group!="canary"} + http_requests_total{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="0", job="api-server"} 100 + +eval instant at 50m http_requests_total{job=~".+-server",group!="canary"} + http_requests_total{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="0", job="api-server"} 100 + +eval instant at 50m http_requests_total{job!~"api-.+",group!="canary"} + http_requests_total{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="production", instance="0", job="app-server"} 500 + +eval instant at 50m http_requests_total{group="production",job=~"api-.+"} + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="1", job="api-server"} 200 + +eval instant at 50m http_requests_total{group="production",job="api-server"} offset 5m + http_requests_total{group="production", instance="0", job="api-server"} 90 + http_requests_total{group="production", instance="1", job="api-server"} 180 clear diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test index 3ac547a2b57..5a5e4e00920 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test @@ -1,41 +1,41 @@ load 10s - metric 1 2 + metric_total 1 2 # Evaluation before 0s gets no sample. -eval instant at 10s sum_over_time(metric[50s:10s]) +eval instant at 10s sum_over_time(metric_total[50s:10s]) {} 3 -eval instant at 10s sum_over_time(metric[50s:5s]) +eval instant at 10s sum_over_time(metric_total[50s:5s]) {} 4 # Every evaluation yields the last value, i.e. 2 -eval instant at 5m sum_over_time(metric[50s:10s]) +eval instant at 5m sum_over_time(metric_total[50s:10s]) {} 10 -# Series becomes stale at 5m10s (5m after last sample) +# Series becomes stale at 5m10s (5m after last sample). # Hence subquery gets a single sample at 5m10s. -eval instant at 5m59s sum_over_time(metric[60s:10s]) +eval instant at 5m59s sum_over_time(metric_total[60s:10s]) {} 2 -eval instant at 10s rate(metric[20s:10s]) +eval instant at 10s rate(metric_total[20s:10s]) {} 0.1 -eval instant at 20s rate(metric[20s:5s]) +eval instant at 20s rate(metric_total[20s:5s]) {} 0.06666666666666667 clear load 10s - http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 - http_requests{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 - http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 - http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 + http_requests_total{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 + http_requests_total{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 + http_requests_total{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 + http_requests_total{job="api-server", instance="1", group="canary"} 0+40x2000 -eval instant at 8000s rate(http_requests{group=~"pro.*"}[1m:10s]) +eval instant at 8000s rate(http_requests_total{group=~"pro.*"}[1m:10s]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 20000s avg_over_time(rate(http_requests[1m])[1m:1s]) +eval instant at 20000s avg_over_time(rate(http_requests_total[1m])[1m:1s]) {job="api-server", instance="0", group="canary"} 8 {job="api-server", instance="1", group="canary"} 4 {job="api-server", instance="1", group="production"} 3 @@ -44,64 +44,64 @@ eval instant at 20000s avg_over_time(rate(http_requests[1m])[1m:1s]) clear load 10s - metric1 0+1x1000 - metric2 0+2x1000 - metric3 0+3x1000 + metric1_total 0+1x1000 + metric2_total 0+2x1000 + metric3_total 0+3x1000 -eval instant at 1000s sum_over_time(metric1[30s:10s]) +eval instant at 1000s sum_over_time(metric1_total[30s:10s]) {} 297 # This is (97 + 98*2 + 99*2 + 100), because other than 97@975s and 100@1000s, # everything else is repeated with the 5s step. -eval instant at 1000s sum_over_time(metric1[30s:5s]) +eval instant at 1000s sum_over_time(metric1_total[30s:5s]) {} 591 # Offset is aligned with the step, so this is from [98@980s, 99@990s, 100@1000s]. -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 10s) {} 297 # Same result for different offsets due to step alignment. -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 9s) {} 297 -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 7s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 7s) {} 297 -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 5s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 5s) {} 297 -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 3s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 3s) {} 297 -eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s) +eval instant at 1010s sum_over_time((metric1_total)[30s:10s] offset 3s) {} 297 -eval instant at 1010s sum_over_time(metric1[30:10] offset 3) +eval instant at 1010s sum_over_time(metric1_total[30:10] offset 3) {} 297 -eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s) +eval instant at 1010s sum_over_time((metric1_total)[30:10s] offset 3s) {} 297 -eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s) +eval instant at 1010s sum_over_time((metric1_total)[30:10s] offset 3s) {} 297 -eval instant at 1010s sum_over_time((metric1)[30:10] offset 3s) +eval instant at 1010s sum_over_time((metric1_total)[30:10] offset 3s) {} 297 -eval instant at 1010s sum_over_time((metric1)[30:10] offset 3) +eval instant at 1010s sum_over_time((metric1_total)[30:10] offset 3) {} 297 -# Nested subqueries -eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s]) +# Nested subqueries. +eval instant at 1000s rate(sum_over_time(metric1_total[30s:10s])[50s:10s]) {} 0.30000000000000004 -eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s]) +eval instant at 1000s rate(sum_over_time(metric2_total[30s:10s])[50s:10s]) {} 0.6000000000000001 -eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s]) +eval instant at 1000s rate(sum_over_time(metric3_total[30s:10s])[50s:10s]) {} 0.9 -eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s]) +eval instant at 1000s rate(sum_over_time((metric1_total+metric2_total+metric3_total)[30s:10s])[30s:10s]) {} 1.8 clear @@ -109,28 +109,28 @@ clear # Fibonacci sequence, to ensure the rate is not constant. # Additional note: using subqueries unnecessarily is unwise. load 7s - metric 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 1597 2584 4181 6765 10946 17711 28657 46368 75025 121393 196418 317811 514229 832040 1346269 2178309 3524578 5702887 9227465 14930352 24157817 39088169 63245986 102334155 165580141 267914296 433494437 701408733 1134903170 1836311903 2971215073 4807526976 7778742049 12586269025 20365011074 32951280099 53316291173 86267571272 139583862445 225851433717 365435296162 591286729879 956722026041 1548008755920 2504730781961 4052739537881 6557470319842 10610209857723 17167680177565 27777890035288 44945570212853 72723460248141 117669030460994 190392490709135 308061521170129 498454011879264 806515533049393 1304969544928657 2111485077978050 3416454622906707 5527939700884757 8944394323791464 14472334024676221 23416728348467685 37889062373143906 61305790721611591 99194853094755497 160500643816367088 259695496911122585 420196140727489673 679891637638612258 1100087778366101931 1779979416004714189 2880067194370816120 4660046610375530309 7540113804746346429 12200160415121876738 19740274219868223167 31940434634990099905 51680708854858323072 83621143489848422977 135301852344706746049 218922995834555169026 354224848179261915075 573147844013817084101 927372692193078999176 1500520536206896083277 2427893228399975082453 3928413764606871165730 6356306993006846248183 10284720757613717413913 16641027750620563662096 26925748508234281076009 43566776258854844738105 70492524767089125814114 114059301025943970552219 184551825793033096366333 298611126818977066918552 483162952612010163284885 781774079430987230203437 1264937032042997393488322 2046711111473984623691759 3311648143516982017180081 5358359254990966640871840 8670007398507948658051921 14028366653498915298923761 22698374052006863956975682 36726740705505779255899443 59425114757512643212875125 96151855463018422468774568 155576970220531065681649693 251728825683549488150424261 407305795904080553832073954 659034621587630041982498215 1066340417491710595814572169 1725375039079340637797070384 2791715456571051233611642553 4517090495650391871408712937 7308805952221443105020355490 11825896447871834976429068427 19134702400093278081449423917 30960598847965113057878492344 50095301248058391139327916261 81055900096023504197206408605 131151201344081895336534324866 212207101440105399533740733471 343358302784187294870275058337 555565404224292694404015791808 898923707008479989274290850145 1454489111232772683678306641953 2353412818241252672952597492098 3807901929474025356630904134051 6161314747715278029583501626149 9969216677189303386214405760200 16130531424904581415797907386349 26099748102093884802012313146549 42230279526998466217810220532898 68330027629092351019822533679447 110560307156090817237632754212345 178890334785183168257455287891792 289450641941273985495088042104137 468340976726457153752543329995929 757791618667731139247631372100066 1226132595394188293000174702095995 1983924214061919432247806074196061 3210056809456107725247980776292056 5193981023518027157495786850488117 8404037832974134882743767626780173 13598018856492162040239554477268290 22002056689466296922983322104048463 35600075545958458963222876581316753 57602132235424755886206198685365216 93202207781383214849429075266681969 150804340016807970735635273952047185 244006547798191185585064349218729154 394810887814999156320699623170776339 638817435613190341905763972389505493 1033628323428189498226463595560281832 1672445759041379840132227567949787325 2706074082469569338358691163510069157 4378519841510949178490918731459856482 7084593923980518516849609894969925639 11463113765491467695340528626429782121 18547707689471986212190138521399707760 + metric_total 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 1597 2584 4181 6765 10946 17711 28657 46368 75025 121393 196418 317811 514229 832040 1346269 2178309 3524578 5702887 9227465 14930352 24157817 39088169 63245986 102334155 165580141 267914296 433494437 701408733 1134903170 1836311903 2971215073 4807526976 7778742049 12586269025 20365011074 32951280099 53316291173 86267571272 139583862445 225851433717 365435296162 591286729879 956722026041 1548008755920 2504730781961 4052739537881 6557470319842 10610209857723 17167680177565 27777890035288 44945570212853 72723460248141 117669030460994 190392490709135 308061521170129 498454011879264 806515533049393 1304969544928657 2111485077978050 3416454622906707 5527939700884757 8944394323791464 14472334024676221 23416728348467685 37889062373143906 61305790721611591 99194853094755497 160500643816367088 259695496911122585 420196140727489673 679891637638612258 1100087778366101931 1779979416004714189 2880067194370816120 4660046610375530309 7540113804746346429 12200160415121876738 19740274219868223167 31940434634990099905 51680708854858323072 83621143489848422977 135301852344706746049 218922995834555169026 354224848179261915075 573147844013817084101 927372692193078999176 1500520536206896083277 2427893228399975082453 3928413764606871165730 6356306993006846248183 10284720757613717413913 16641027750620563662096 26925748508234281076009 43566776258854844738105 70492524767089125814114 114059301025943970552219 184551825793033096366333 298611126818977066918552 483162952612010163284885 781774079430987230203437 1264937032042997393488322 2046711111473984623691759 3311648143516982017180081 5358359254990966640871840 8670007398507948658051921 14028366653498915298923761 22698374052006863956975682 36726740705505779255899443 59425114757512643212875125 96151855463018422468774568 155576970220531065681649693 251728825683549488150424261 407305795904080553832073954 659034621587630041982498215 1066340417491710595814572169 1725375039079340637797070384 2791715456571051233611642553 4517090495650391871408712937 7308805952221443105020355490 11825896447871834976429068427 19134702400093278081449423917 30960598847965113057878492344 50095301248058391139327916261 81055900096023504197206408605 131151201344081895336534324866 212207101440105399533740733471 343358302784187294870275058337 555565404224292694404015791808 898923707008479989274290850145 1454489111232772683678306641953 2353412818241252672952597492098 3807901929474025356630904134051 6161314747715278029583501626149 9969216677189303386214405760200 16130531424904581415797907386349 26099748102093884802012313146549 42230279526998466217810220532898 68330027629092351019822533679447 110560307156090817237632754212345 178890334785183168257455287891792 289450641941273985495088042104137 468340976726457153752543329995929 757791618667731139247631372100066 1226132595394188293000174702095995 1983924214061919432247806074196061 3210056809456107725247980776292056 5193981023518027157495786850488117 8404037832974134882743767626780173 13598018856492162040239554477268290 22002056689466296922983322104048463 35600075545958458963222876581316753 57602132235424755886206198685365216 93202207781383214849429075266681969 150804340016807970735635273952047185 244006547798191185585064349218729154 394810887814999156320699623170776339 638817435613190341905763972389505493 1033628323428189498226463595560281832 1672445759041379840132227567949787325 2706074082469569338358691163510069157 4378519841510949178490918731459856482 7084593923980518516849609894969925639 11463113765491467695340528626429782121 18547707689471986212190138521399707760 # Extrapolated from [3@21, 144@77]: (144 - 3) / (77 - 21) -eval instant at 80s rate(metric[1m]) +eval instant at 80s rate(metric_total[1m]) {} 2.517857143 # Extrapolated to range start for counter, [2@20, 144@80]: (144 - 2) / (80 - 20) -eval instant at 80s rate(metric[1m500ms:10s]) +eval instant at 80s rate(metric_total[1m500ms:10s]) {} 2.3666666666666667 # Extrapolated to zero value for counter, [2@20, 144@80]: (144 - 0) / 61 -eval instant at 80s rate(metric[1m1s:10s]) +eval instant at 80s rate(metric_total[1m1s:10s]) {} 2.360655737704918 # Only one value between 10s and 20s, 2@14 -eval instant at 20s min_over_time(metric[10s]) +eval instant at 20s min_over_time(metric_total[10s]) {} 2 # min(2@20) -eval instant at 20s min_over_time(metric[15s:10s]) +eval instant at 20s min_over_time(metric_total[15s:10s]) {} 1 -eval instant at 20m min_over_time(rate(metric[5m])[20m:1m]) +eval instant at 20m min_over_time(rate(metric_total[5m])[20m:1m]) {} 0.12119047619047618 diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go index 3144a0f648e..2f7f661adb7 100644 --- a/vendor/github.com/prometheus/prometheus/storage/merge.go +++ b/vendor/github.com/prometheus/prometheus/storage/merge.go @@ -19,7 +19,6 @@ import ( "context" "fmt" "math" - "slices" "sync" "github.com/prometheus/prometheus/model/histogram" @@ -136,13 +135,17 @@ func filterChunkQueriers(qs []ChunkQuerier) []ChunkQuerier { // Select returns a set of series that matches the given label matchers. func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) + var limit int + if hints != nil { + limit = hints.Limit + } if !q.concurrentSelect { for _, querier := range q.queriers { // We need to sort for merge to work. seriesSets = append(seriesSets, querier.Select(ctx, true, hints, matchers...)) } return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) { - s := newGenericMergeSeriesSet(seriesSets, q.mergeFn) + s := newGenericMergeSeriesSet(seriesSets, limit, q.mergeFn) return s, s.Next() }} } @@ -175,7 +178,7 @@ func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints seriesSets = append(seriesSets, r) } return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) { - s := newGenericMergeSeriesSet(seriesSets, q.mergeFn) + s := newGenericMergeSeriesSet(seriesSets, limit, q.mergeFn) return s, s.Next() }} } @@ -193,35 +196,44 @@ func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQ // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - res, ws, err := q.lvals(ctx, q.queriers, name, hints, matchers...) + res, ws, err := q.mergeResults(q.queriers, hints, func(q LabelQuerier) ([]string, annotations.Annotations, error) { + return q.LabelValues(ctx, name, hints, matchers...) + }) if err != nil { return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err) } return res, ws, nil } -// lvals performs merge sort for LabelValues from multiple queriers. -func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +// mergeResults performs merge sort on the results of invoking the resultsFn against multiple queriers. +func (q *mergeGenericQuerier) mergeResults(lq labelGenericQueriers, hints *LabelHints, resultsFn func(q LabelQuerier) ([]string, annotations.Annotations, error)) ([]string, annotations.Annotations, error) { if lq.Len() == 0 { return nil, nil, nil } if lq.Len() == 1 { - return lq.Get(0).LabelValues(ctx, n, hints, matchers...) + return resultsFn(lq.Get(0)) } a, b := lq.SplitByHalf() var ws annotations.Annotations - s1, w, err := q.lvals(ctx, a, n, hints, matchers...) + s1, w, err := q.mergeResults(a, hints, resultsFn) ws.Merge(w) if err != nil { return nil, ws, err } - s2, ws, err := q.lvals(ctx, b, n, hints, matchers...) + s2, w, err := q.mergeResults(b, hints, resultsFn) ws.Merge(w) if err != nil { return nil, ws, err } - return mergeStrings(s1, s2), ws, nil + + s1 = truncateToLimit(s1, hints) + s2 = truncateToLimit(s2, hints) + + merged := mergeStrings(s1, s2) + merged = truncateToLimit(merged, hints) + + return merged, ws, nil } func mergeStrings(a, b []string) []string { @@ -253,33 +265,13 @@ func mergeStrings(a, b []string) []string { // LabelNames returns all the unique label names present in all queriers in sorted order. func (q *mergeGenericQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - var ( - labelNamesMap = make(map[string]struct{}) - warnings annotations.Annotations - ) - for _, querier := range q.queriers { - names, wrn, err := querier.LabelNames(ctx, hints, matchers...) - if wrn != nil { - // TODO(bwplotka): We could potentially wrap warnings. - warnings.Merge(wrn) - } - if err != nil { - return nil, nil, fmt.Errorf("LabelNames() from merge generic querier: %w", err) - } - for _, name := range names { - labelNamesMap[name] = struct{}{} - } - } - if len(labelNamesMap) == 0 { - return nil, warnings, nil - } - - labelNames := make([]string, 0, len(labelNamesMap)) - for name := range labelNamesMap { - labelNames = append(labelNames, name) + res, ws, err := q.mergeResults(q.queriers, hints, func(q LabelQuerier) ([]string, annotations.Annotations, error) { + return q.LabelNames(ctx, hints, matchers...) + }) + if err != nil { + return nil, nil, fmt.Errorf("LabelNames() from merge generic querier: %w", err) } - slices.Sort(labelNames) - return labelNames, warnings, nil + return res, ws, nil } // Close releases the resources of the generic querier. @@ -293,17 +285,25 @@ func (q *mergeGenericQuerier) Close() error { return errs.Err() } +func truncateToLimit(s []string, hints *LabelHints) []string { + if hints != nil && hints.Limit > 0 && len(s) > hints.Limit { + s = s[:hints.Limit] + } + return s +} + // VerticalSeriesMergeFunc returns merged series implementation that merges series with same labels together. // It has to handle time-overlapped series as well. type VerticalSeriesMergeFunc func(...Series) Series // NewMergeSeriesSet returns a new SeriesSet that merges many SeriesSets together. -func NewMergeSeriesSet(sets []SeriesSet, mergeFunc VerticalSeriesMergeFunc) SeriesSet { +// If limit is set, the SeriesSet will be limited up-to the limit. 0 means disabled. +func NewMergeSeriesSet(sets []SeriesSet, limit int, mergeFunc VerticalSeriesMergeFunc) SeriesSet { genericSets := make([]genericSeriesSet, 0, len(sets)) for _, s := range sets { genericSets = append(genericSets, &genericSeriesSetAdapter{s}) } - return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge)} + return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, limit, (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge)} } // VerticalChunkSeriesMergeFunc returns merged chunk series implementation that merges potentially time-overlapping @@ -313,12 +313,12 @@ func NewMergeSeriesSet(sets []SeriesSet, mergeFunc VerticalSeriesMergeFunc) Seri type VerticalChunkSeriesMergeFunc func(...ChunkSeries) ChunkSeries // NewMergeChunkSeriesSet returns a new ChunkSeriesSet that merges many SeriesSet together. -func NewMergeChunkSeriesSet(sets []ChunkSeriesSet, mergeFunc VerticalChunkSeriesMergeFunc) ChunkSeriesSet { +func NewMergeChunkSeriesSet(sets []ChunkSeriesSet, limit int, mergeFunc VerticalChunkSeriesMergeFunc) ChunkSeriesSet { genericSets := make([]genericSeriesSet, 0, len(sets)) for _, s := range sets { genericSets = append(genericSets, &genericChunkSeriesSetAdapter{s}) } - return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFunc}).Merge)} + return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, limit, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFunc}).Merge)} } // genericMergeSeriesSet implements genericSeriesSet. @@ -326,9 +326,11 @@ type genericMergeSeriesSet struct { currentLabels labels.Labels mergeFunc genericSeriesMergeFunc - heap genericSeriesSetHeap - sets []genericSeriesSet - currentSets []genericSeriesSet + heap genericSeriesSetHeap + sets []genericSeriesSet + currentSets []genericSeriesSet + seriesLimit int + mergedSeries int // tracks the total number of series merged and returned. } // newGenericMergeSeriesSet returns a new genericSeriesSet that merges (and deduplicates) @@ -336,7 +338,8 @@ type genericMergeSeriesSet struct { // Each series set must return its series in labels order, otherwise // merged series set will be incorrect. // Overlapped situations are merged using provided mergeFunc. -func newGenericMergeSeriesSet(sets []genericSeriesSet, mergeFunc genericSeriesMergeFunc) genericSeriesSet { +// If seriesLimit is set, only limited series are returned. +func newGenericMergeSeriesSet(sets []genericSeriesSet, seriesLimit int, mergeFunc genericSeriesMergeFunc) genericSeriesSet { if len(sets) == 1 { return sets[0] } @@ -356,13 +359,19 @@ func newGenericMergeSeriesSet(sets []genericSeriesSet, mergeFunc genericSeriesMe } } return &genericMergeSeriesSet{ - mergeFunc: mergeFunc, - sets: sets, - heap: h, + mergeFunc: mergeFunc, + sets: sets, + heap: h, + seriesLimit: seriesLimit, } } func (c *genericMergeSeriesSet) Next() bool { + if c.seriesLimit > 0 && c.mergedSeries >= c.seriesLimit { + // Exit early if seriesLimit is set. + return false + } + // Run in a loop because the "next" series sets may not be valid anymore. // If, for the current label set, all the next series sets come from // failed remote storage sources, we want to keep trying with the next label set. @@ -393,6 +402,7 @@ func (c *genericMergeSeriesSet) Next() bool { break } } + c.mergedSeries++ return true } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 3dde6ce6a17..e33c93ab4a3 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -58,6 +58,7 @@ type PrometheusConverter struct { unique map[uint64]*prompb.TimeSeries conflicts map[uint64][]*prompb.TimeSeries everyN everyNTimes + metadata []prompb.MetricMetadata } func NewPrometheusConverter() *PrometheusConverter { @@ -71,6 +72,16 @@ func NewPrometheusConverter() *PrometheusConverter { func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger *slog.Logger) (annots annotations.Annotations, errs error) { c.everyN = everyNTimes{n: 128} resourceMetricsSlice := md.ResourceMetrics() + + numMetrics := 0 + for i := 0; i < resourceMetricsSlice.Len(); i++ { + scopeMetricsSlice := resourceMetricsSlice.At(i).ScopeMetrics() + for j := 0; j < scopeMetricsSlice.Len(); j++ { + numMetrics += scopeMetricsSlice.At(j).Metrics().Len() + } + } + c.metadata = make([]prompb.MetricMetadata, 0, numMetrics) + for i := 0; i < resourceMetricsSlice.Len(); i++ { resourceMetrics := resourceMetricsSlice.At(i) resource := resourceMetrics.Resource() @@ -97,6 +108,12 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric } promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes, settings.AllowUTF8) + c.metadata = append(c.metadata, prompb.MetricMetadata{ + Type: otelMetricTypeToPromMetricType(metric), + MetricFamilyName: promName, + Help: metric.Description(), + Unit: metric.Unit(), + }) // handle individual metrics based on type //exhaustive:enforce diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go index b423d2cc6e4..359fc525220 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go @@ -20,7 +20,6 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "github.com/prometheus/prometheus/prompb" - prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" ) func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) prompb.MetricMetadata_MetricType { @@ -42,36 +41,3 @@ func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) prompb.MetricMeta } return prompb.MetricMetadata_UNKNOWN } - -func OtelMetricsToMetadata(md pmetric.Metrics, addMetricSuffixes, allowUTF8 bool) []*prompb.MetricMetadata { - resourceMetricsSlice := md.ResourceMetrics() - - metadataLength := 0 - for i := 0; i < resourceMetricsSlice.Len(); i++ { - scopeMetricsSlice := resourceMetricsSlice.At(i).ScopeMetrics() - for j := 0; j < scopeMetricsSlice.Len(); j++ { - metadataLength += scopeMetricsSlice.At(j).Metrics().Len() - } - } - - var metadata = make([]*prompb.MetricMetadata, 0, metadataLength) - for i := 0; i < resourceMetricsSlice.Len(); i++ { - resourceMetrics := resourceMetricsSlice.At(i) - scopeMetricsSlice := resourceMetrics.ScopeMetrics() - - for j := 0; j < scopeMetricsSlice.Len(); j++ { - scopeMetrics := scopeMetricsSlice.At(j) - for k := 0; k < scopeMetrics.Metrics().Len(); k++ { - metric := scopeMetrics.Metrics().At(k) - entry := prompb.MetricMetadata{ - Type: otelMetricTypeToPromMetricType(metric), - MetricFamilyName: prometheustranslator.BuildCompliantName(metric, "", addMetricSuffixes, allowUTF8), - Help: metric.Description(), - } - metadata = append(metadata, &entry) - } - } - } - - return metadata -} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go index fe973761a25..abffbe61054 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go @@ -39,3 +39,8 @@ func (c *PrometheusConverter) TimeSeries() []prompb.TimeSeries { return allTS } + +// Metadata returns a slice of the prompb.Metadata that were converted from OTel format. +func (c *PrometheusConverter) Metadata() []prompb.MetricMetadata { + return c.metadata +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index e15c01bed7a..5ccc270534c 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -532,6 +532,7 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{ Timeseries: converter.TimeSeries(), + Metadata: converter.Metadata(), }) switch { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 8042dae1962..651736ba005 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -1149,7 +1149,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa if len(sets) > 1 { // Merge series using specified chunk series merger. // The default one is the compacting series merger. - set = storage.NewMergeChunkSeriesSet(sets, mergeFunc) + set = storage.NewMergeChunkSeriesSet(sets, 0, mergeFunc) } // Iterate over all sorted chunk series. @@ -1238,7 +1238,7 @@ func populateSymbols(ctx context.Context, mergeFunc storage.VerticalChunkSeriesM seriesSet := sets[0] if len(sets) > 1 { - seriesSet = storage.NewMergeChunkSeriesSet(sets, mergeFunc) + seriesSet = storage.NewMergeChunkSeriesSet(sets, 0, mergeFunc) } for seriesSet.Next() { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index 6729d770901..b1f3abd1545 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -658,32 +658,15 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch concurrency = h.opts.WALReplayConcurrency processors = make([]wblSubsetProcessor, concurrency) - dec record.Decoder shards = make([][]record.RefSample, concurrency) histogramShards = make([][]histogramRecord, concurrency) - decodedCh = make(chan interface{}, 10) - decodeErr error - samplesPool = sync.Pool{ - New: func() interface{} { - return []record.RefSample{} - }, - } - markersPool = sync.Pool{ - New: func() interface{} { - return []record.RefMmapMarker{} - }, - } - histogramSamplesPool = sync.Pool{ - New: func() interface{} { - return []record.RefHistogramSample{} - }, - } - floatHistogramSamplesPool = sync.Pool{ - New: func() interface{} { - return []record.RefFloatHistogramSample{} - }, - } + decodedCh = make(chan interface{}, 10) + decodeErr error + samplesPool zeropool.Pool[[]record.RefSample] + markersPool zeropool.Pool[[]record.RefMmapMarker] + histogramSamplesPool zeropool.Pool[[]record.RefHistogramSample] + floatHistogramSamplesPool zeropool.Pool[[]record.RefFloatHistogramSample] ) defer func() { @@ -713,11 +696,13 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch go func() { defer close(decodedCh) + var err error + dec := record.NewDecoder(syms) for r.Next() { rec := r.Record() switch dec.Type(rec) { case record.Samples: - samples := samplesPool.Get().([]record.RefSample)[:0] + samples := samplesPool.Get()[:0] samples, err = dec.Samples(rec, samples) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -729,7 +714,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decodedCh <- samples case record.MmapMarkers: - markers := markersPool.Get().([]record.RefMmapMarker)[:0] + markers := markersPool.Get()[:0] markers, err = dec.MmapMarkers(rec, markers) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -741,7 +726,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decodedCh <- markers case record.HistogramSamples: - hists := histogramSamplesPool.Get().([]record.RefHistogramSample)[:0] + hists := histogramSamplesPool.Get()[:0] hists, err = dec.HistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -753,7 +738,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decodedCh <- hists case record.FloatHistogramSamples: - hists := floatHistogramSamplesPool.Get().([]record.RefFloatHistogramSample)[:0] + hists := floatHistogramSamplesPool.Get()[:0] hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -804,7 +789,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } samples = samples[m:] } - samplesPool.Put(d) + samplesPool.Put(v) case []record.RefMmapMarker: markers := v for _, rm := range markers { @@ -859,7 +844,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } samples = samples[m:] } - histogramSamplesPool.Put(v) //nolint:staticcheck + histogramSamplesPool.Put(v) case []record.RefFloatHistogramSample: samples := v // We split up the samples into chunks of 5000 samples or less. @@ -891,7 +876,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } samples = samples[m:] } - floatHistogramSamplesPool.Put(v) //nolint:staticcheck + floatHistogramSamplesPool.Put(v) default: panic(fmt.Errorf("unexpected decodedCh type: %T", d)) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index 67c8f53bfed..b44b4089275 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -514,10 +514,14 @@ func (p *MemPostings) PostingsForAllLabelValues(ctx context.Context, name string e := p.m[name] its := make([]Postings, 0, len(e)) + lps := make([]ListPostings, len(e)) + i := 0 for _, refs := range e { if len(refs) > 0 { - its = append(its, NewListPostings(refs)) + lps[i] = ListPostings{list: refs} + its = append(its, &lps[i]) } + i++ } // Let the mutex go before merging. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go index d68ef2accb8..89db5d2dd72 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go @@ -206,7 +206,7 @@ func (w *Watcher) Notify() { } } -func (w *Watcher) setMetrics() { +func (w *Watcher) SetMetrics() { // Setup the WAL Watchers metrics. We do this here rather than in the // constructor because of the ordering of creating Queue Managers's, // stopping them, and then starting new ones in storage/remote/storage.go ApplyConfig. @@ -221,7 +221,7 @@ func (w *Watcher) setMetrics() { // Start the Watcher. func (w *Watcher) Start() { - w.setMetrics() + w.SetMetrics() w.logger.Info("Starting WAL watcher", "queue", w.name) go w.loop() diff --git a/vendor/github.com/prometheus/prometheus/util/convertnhcb/convertnhcb.go b/vendor/github.com/prometheus/prometheus/util/convertnhcb/convertnhcb.go index ee5bcb72def..21ae62b3cb3 100644 --- a/vendor/github.com/prometheus/prometheus/util/convertnhcb/convertnhcb.go +++ b/vendor/github.com/prometheus/prometheus/util/convertnhcb/convertnhcb.go @@ -139,21 +139,18 @@ func (h TempHistogram) Convert() (*histogram.Histogram, *histogram.FloatHistogra return nil, nil, h.err } + if !h.hasCount && len(h.buckets) > 0 { + // No count, so set count to the highest known bucket's count. + h.count = h.buckets[len(h.buckets)-1].count + h.hasCount = true + } + if len(h.buckets) == 0 || h.buckets[len(h.buckets)-1].le != math.Inf(1) { // No +Inf bucket. - if !h.hasCount && len(h.buckets) > 0 { - // No count either, so set count to the last known bucket's count. - h.count = h.buckets[len(h.buckets)-1].count - } // Let the last bucket be +Inf with the overall count. h.buckets = append(h.buckets, tempHistogramBucket{le: math.Inf(1), count: h.count}) } - if !h.hasCount { - h.count = h.buckets[len(h.buckets)-1].count - h.hasCount = true - } - for _, b := range h.buckets { intCount := int64(math.Round(b.count)) if b.count != float64(intCount) { @@ -232,26 +229,34 @@ func (h TempHistogram) convertToFloatHistogram() (*histogram.Histogram, *histogr return nil, rh.Compact(0), nil } -func GetHistogramMetricBase(m labels.Labels, suffix string) labels.Labels { - mName := m.Get(labels.MetricName) +func GetHistogramMetricBase(m labels.Labels, name string) labels.Labels { return labels.NewBuilder(m). - Set(labels.MetricName, strings.TrimSuffix(mName, suffix)). + Set(labels.MetricName, name). Del(labels.BucketLabel). Labels() } +type SuffixType int + +const ( + SuffixNone SuffixType = iota + SuffixBucket + SuffixSum + SuffixCount +) + // GetHistogramMetricBaseName removes the suffixes _bucket, _sum, _count from // the metric name. We specifically do not remove the _created suffix as that // should be removed by the caller. -func GetHistogramMetricBaseName(s string) string { +func GetHistogramMetricBaseName(s string) (SuffixType, string) { if r, ok := strings.CutSuffix(s, "_bucket"); ok { - return r + return SuffixBucket, r } if r, ok := strings.CutSuffix(s, "_sum"); ok { - return r + return SuffixSum, r } if r, ok := strings.CutSuffix(s, "_count"); ok { - return r + return SuffixCount, r } - return s + return SuffixNone, s } diff --git a/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go b/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go index d5aee5c095c..e7dff20f786 100644 --- a/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go +++ b/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go @@ -26,6 +26,8 @@ const ( maxEntries = 1024 ) +var _ slog.Handler = (*Deduper)(nil) + // Deduper implements *slog.Handler, dedupes log lines based on a time duration. type Deduper struct { next *slog.Logger diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index 76a8c5864e1..14b99cce9b5 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -919,7 +919,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { s := q.Select(ctx, true, hints, mset...) sets = append(sets, s) } - set = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) + set = storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge) } else { // At this point at least one match exists. set = q.Select(ctx, false, hints, matcherSets[0]...) @@ -1376,7 +1376,7 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { // RuleDiscovery has info for all rules. type RuleDiscovery struct { RuleGroups []*RuleGroup `json:"groups"` - GroupNextToken string `json:"groupNextToken:omitempty"` + GroupNextToken string `json:"groupNextToken,omitempty"` } // RuleGroup has info for rules which are part of a group. diff --git a/vendor/modules.txt b/vendor/modules.txt index b052f6d52ea..5e9e8d606ce 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1017,7 +1017,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20241205085346-9acc41d486c3 +# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20241209002314-aa96f2e80ba9 ## explicit; go 1.22.0 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1620,10 +1620,10 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 ## explicit gopkg.in/yaml.v3 -# k8s.io/apimachinery v0.31.1 +# k8s.io/apimachinery v0.31.2 ## explicit; go 1.22.0 k8s.io/apimachinery/pkg/util/runtime -# k8s.io/client-go v0.31.1 +# k8s.io/client-go v0.31.2 ## explicit; go 1.22.0 k8s.io/client-go/tools/metrics k8s.io/client-go/util/workqueue @@ -1688,7 +1688,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20241205085346-9acc41d486c3 +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20241209002314-aa96f2e80ba9 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe # gopkg.in/yaml.v3 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20240531075221-3685f1377d7b