From 3af22a865221e493426ddbbf841fe55bf640662a Mon Sep 17 00:00:00 2001 From: Eng Zer Jun Date: Fri, 13 Dec 2024 21:45:58 +0800 Subject: [PATCH] Replace min/max helpers with built-in min/max (#10202) We can use the built-in `min` and `max` functions since Go 1.21. Reference: https://go.dev/ref/spec#Min_and_max Signed-off-by: Eng Zer Jun --- pkg/continuoustest/client.go | 3 +-- pkg/distributor/distributor.go | 8 +++--- pkg/distributor/distributor_test.go | 3 +-- pkg/frontend/querymiddleware/querysharding.go | 7 +++-- pkg/frontend/querymiddleware/results_cache.go | 5 ++-- pkg/ingester/ingester.go | 6 ++--- .../ingester_early_compaction_test.go | 3 +-- pkg/ingester/ingester_test.go | 3 +-- pkg/ingester/limiter.go | 5 ++-- pkg/ingester/user_tsdb.go | 2 +- pkg/querier/cardinality_analysis_handler.go | 3 +-- pkg/querier/querier.go | 5 ++-- pkg/querier/worker/worker.go | 3 +-- pkg/storegateway/bucket_chunk_reader.go | 3 +-- pkg/storegateway/bucket_index_postings.go | 3 +-- pkg/storegateway/bucket_index_reader.go | 3 +-- .../indexheader/encoding/encoding.go | 4 +-- pkg/storegateway/series_chunks.go | 3 +-- pkg/util/math/math.go | 26 ------------------- tools/wal-reader/main.go | 14 +++++----- 20 files changed, 34 insertions(+), 78 deletions(-) delete mode 100644 pkg/util/math/math.go diff --git a/pkg/continuoustest/client.go b/pkg/continuoustest/client.go index 13f694e078b..9592ff67be3 100644 --- a/pkg/continuoustest/client.go +++ b/pkg/continuoustest/client.go @@ -20,7 +20,6 @@ import ( querierapi "github.com/grafana/mimir/pkg/querier/api" "github.com/grafana/mimir/pkg/util/chunkinfologger" "github.com/grafana/mimir/pkg/util/instrumentation" - util_math "github.com/grafana/mimir/pkg/util/math" ) const ( @@ -216,7 +215,7 @@ func (c *Client) WriteSeries(ctx context.Context, series []prompb.TimeSeries) (i // Honor the batch size. for len(series) > 0 { - end := util_math.Min(len(series), c.cfg.WriteBatchSize) + end := min(len(series), c.cfg.WriteBatchSize) batch := series[0:end] series = series[end:] diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index bd4c3b3ffe8..7bf589f7bc4 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -1133,12 +1133,12 @@ func (d *Distributor) prePushValidationMiddleware(next PushFunc) PushFunc { earliestSampleTimestampMs, latestSampleTimestampMs := int64(math.MaxInt64), int64(0) for _, ts := range req.Timeseries { for _, s := range ts.Samples { - earliestSampleTimestampMs = util_math.Min(earliestSampleTimestampMs, s.TimestampMs) - latestSampleTimestampMs = util_math.Max(latestSampleTimestampMs, s.TimestampMs) + earliestSampleTimestampMs = min(earliestSampleTimestampMs, s.TimestampMs) + latestSampleTimestampMs = max(latestSampleTimestampMs, s.TimestampMs) } for _, h := range ts.Histograms { - earliestSampleTimestampMs = util_math.Min(earliestSampleTimestampMs, h.Timestamp) - latestSampleTimestampMs = util_math.Max(latestSampleTimestampMs, h.Timestamp) + earliestSampleTimestampMs = min(earliestSampleTimestampMs, h.Timestamp) + latestSampleTimestampMs = max(latestSampleTimestampMs, h.Timestamp) } } // Update this metric even in case of errors. diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 561021499d1..76a27fff797 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -64,7 +64,6 @@ import ( "github.com/grafana/mimir/pkg/util/extract" "github.com/grafana/mimir/pkg/util/globalerror" "github.com/grafana/mimir/pkg/util/limiter" - util_math "github.com/grafana/mimir/pkg/util/math" util_test "github.com/grafana/mimir/pkg/util/test" "github.com/grafana/mimir/pkg/util/testkafka" "github.com/grafana/mimir/pkg/util/validation" @@ -960,7 +959,7 @@ func TestDistributor_PushQuery(t *testing.T) { var expectedIngesters int if shuffleShardSize > 0 { - expectedIngesters = util_math.Min(shuffleShardSize, numIngesters) + expectedIngesters = min(shuffleShardSize, numIngesters) } else { expectedIngesters = numIngesters } diff --git a/pkg/frontend/querymiddleware/querysharding.go b/pkg/frontend/querymiddleware/querysharding.go index 1fe6a186de1..7fede857cb9 100644 --- a/pkg/frontend/querymiddleware/querysharding.go +++ b/pkg/frontend/querymiddleware/querysharding.go @@ -30,7 +30,6 @@ import ( "github.com/grafana/mimir/pkg/querier/stats" "github.com/grafana/mimir/pkg/storage/lazyquery" "github.com/grafana/mimir/pkg/util" - util_math "github.com/grafana/mimir/pkg/util/math" "github.com/grafana/mimir/pkg/util/spanlogger" "github.com/grafana/mimir/pkg/util/validation" ) @@ -345,7 +344,7 @@ func (s *querySharding) getShardsForQuery(ctx context.Context, tenantIDs []strin prevTotalShards := totalShards // If an estimate for query cardinality is available, use it to limit the number // of shards based on linear interpolation. - totalShards = util_math.Min(totalShards, int(seriesCount.EstimatedSeriesCount/s.maxSeriesPerShard)+1) + totalShards = min(totalShards, int(seriesCount.EstimatedSeriesCount/s.maxSeriesPerShard)+1) if prevTotalShards != totalShards { spanLog.DebugLog( @@ -380,7 +379,7 @@ func (s *querySharding) getShardsForQuery(ctx context.Context, tenantIDs []strin } prevTotalShards := totalShards - totalShards = util_math.Max(1, util_math.Min(totalShards, (maxShardedQueries/int(hints.TotalQueries))/numShardableLegs)) + totalShards = max(1, min(totalShards, (maxShardedQueries/int(hints.TotalQueries))/numShardableLegs)) if prevTotalShards != totalShards { spanLog.DebugLog( @@ -498,7 +497,7 @@ func longestRegexpMatcherBytes(expr parser.Expr) int { continue } - longest = util_math.Max(longest, len(matcher.Value)) + longest = max(longest, len(matcher.Value)) } } diff --git a/pkg/frontend/querymiddleware/results_cache.go b/pkg/frontend/querymiddleware/results_cache.go index 9a6c65612b1..b13699c30c5 100644 --- a/pkg/frontend/querymiddleware/results_cache.go +++ b/pkg/frontend/querymiddleware/results_cache.go @@ -34,7 +34,6 @@ import ( "github.com/grafana/mimir/pkg/mimirpb" "github.com/grafana/mimir/pkg/util" - "github.com/grafana/mimir/pkg/util/math" ) const ( @@ -394,11 +393,11 @@ func mergeCacheExtentsForRequest(ctx context.Context, r MetricsQueryRequest, mer if accumulator.QueryTimestampMs > 0 && extents[i].QueryTimestampMs > 0 { // Keep older (minimum) timestamp. - accumulator.QueryTimestampMs = math.Min(accumulator.QueryTimestampMs, extents[i].QueryTimestampMs) + accumulator.QueryTimestampMs = min(accumulator.QueryTimestampMs, extents[i].QueryTimestampMs) } else { // Some old extents may have zero timestamps. In that case we keep the non-zero one. // (Hopefully one of them is not zero, since we're only merging if there are some new extents.) - accumulator.QueryTimestampMs = math.Max(accumulator.QueryTimestampMs, extents[i].QueryTimestampMs) + accumulator.QueryTimestampMs = max(accumulator.QueryTimestampMs, extents[i].QueryTimestampMs) } } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 66f35dd6365..08edb6ab54c 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -2964,7 +2964,7 @@ func (i *Ingester) minTsdbHeadTimestamp() float64 { minTime := int64(math.MaxInt64) for _, db := range i.tsdbs { - minTime = util_math.Min(minTime, db.db.Head().MinTime()) + minTime = min(minTime, db.db.Head().MinTime()) } if minTime == math.MaxInt64 { @@ -2980,7 +2980,7 @@ func (i *Ingester) maxTsdbHeadTimestamp() float64 { maxTime := int64(math.MinInt64) for _, db := range i.tsdbs { - maxTime = util_math.Max(maxTime, db.db.Head().MaxTime()) + maxTime = max(maxTime, db.db.Head().MaxTime()) } if maxTime == math.MinInt64 { @@ -3248,7 +3248,7 @@ func (i *Ingester) compactBlocksToReduceInMemorySeries(ctx context.Context, now // Estimate the number of series that would be dropped from the TSDB Head if we would // compact the head up until "now - active series idle timeout". totalActiveSeries, _, _ := db.activeSeries.Active() - estimatedSeriesReduction := util_math.Max(0, int64(userMemorySeries)-int64(totalActiveSeries)) + estimatedSeriesReduction := max(0, int64(userMemorySeries)-int64(totalActiveSeries)) estimations = append(estimations, seriesReductionEstimation{ userID: userID, estimatedCount: estimatedSeriesReduction, diff --git a/pkg/ingester/ingester_early_compaction_test.go b/pkg/ingester/ingester_early_compaction_test.go index 7e34dc399b3..531d8a673f0 100644 --- a/pkg/ingester/ingester_early_compaction_test.go +++ b/pkg/ingester/ingester_early_compaction_test.go @@ -28,7 +28,6 @@ import ( "golang.org/x/exp/slices" "github.com/grafana/mimir/pkg/ingester/client" - util_math "github.com/grafana/mimir/pkg/util/math" util_test "github.com/grafana/mimir/pkg/util/test" ) @@ -602,7 +601,7 @@ func TestIngester_compactBlocksToReduceInMemorySeries_Concurrency(t *testing.T) // Find the lowest sample written. We compact up until that timestamp. writerTimesMx.Lock() for _, ts := range writerTimes { - lowestWriterTimeMilli = util_math.Min(lowestWriterTimeMilli, ts) + lowestWriterTimeMilli = min(lowestWriterTimeMilli, ts) } writerTimesMx.Unlock() diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index e92b19b7f75..6d03bc83535 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -70,7 +70,6 @@ import ( "github.com/grafana/mimir/pkg/usagestats" "github.com/grafana/mimir/pkg/util" "github.com/grafana/mimir/pkg/util/globalerror" - util_math "github.com/grafana/mimir/pkg/util/math" util_test "github.com/grafana/mimir/pkg/util/test" "github.com/grafana/mimir/pkg/util/validation" ) @@ -4978,7 +4977,7 @@ func createIngesterWithSeries(t testing.TB, userID string, numSeries, numSamples for ts := startTimestamp; ts < startTimestamp+(step*int64(numSamplesPerSeries)); ts += step { for o := 0; o < numSeries; o += maxBatchSize { - batchSize := util_math.Min(maxBatchSize, numSeries-o) + batchSize := min(maxBatchSize, numSeries-o) // Generate metrics and samples (1 for each series). metrics := make([][]mimirpb.LabelAdapter, 0, batchSize) diff --git a/pkg/ingester/limiter.go b/pkg/ingester/limiter.go index 74f81c545ac..f743cb82a7e 100644 --- a/pkg/ingester/limiter.go +++ b/pkg/ingester/limiter.go @@ -11,7 +11,6 @@ import ( "github.com/grafana/dskit/ring" "github.com/grafana/mimir/pkg/util" - util_math "github.com/grafana/mimir/pkg/util/math" ) // limiterTenantLimits provides access to limits used by Limiter. @@ -168,7 +167,7 @@ func (is *ingesterRingLimiterStrategy) convertGlobalToLocalLimit(userID string, // expected number of ingesters per sharded zone, then we should honor the latter because series/metadata // cannot be written to more ingesters than that. if userShardSize > 0 { - ingestersInZoneCount = util_math.Min(ingestersInZoneCount, util.ShuffleShardExpectedInstancesPerZone(userShardSize, zonesCount)) + ingestersInZoneCount = min(ingestersInZoneCount, util.ShuffleShardExpectedInstancesPerZone(userShardSize, zonesCount)) } // This may happen, for example when the total number of ingesters is asynchronously updated, or @@ -190,7 +189,7 @@ func (is *ingesterRingLimiterStrategy) getShardSize(userID string) int { func (is *ingesterRingLimiterStrategy) getZonesCount() int { if is.zoneAwarenessEnabled { - return util_math.Max(is.ring.ZonesCount(), 1) + return max(is.ring.ZonesCount(), 1) } return 1 } diff --git a/pkg/ingester/user_tsdb.go b/pkg/ingester/user_tsdb.go index 95bfe9840e2..5a3ed82c28c 100644 --- a/pkg/ingester/user_tsdb.go +++ b/pkg/ingester/user_tsdb.go @@ -261,7 +261,7 @@ func nextForcedHeadCompactionRange(blockDuration, headMinTime, headMaxTime, forc // By default we try to compact the whole head, honoring the forcedMaxTime. minTime = headMinTime - maxTime = util_math.Min(headMaxTime, forcedMaxTime) + maxTime = min(headMaxTime, forcedMaxTime) // Due to the forcedMaxTime, the range may be empty. In that case we just skip it. if maxTime < minTime { diff --git a/pkg/querier/cardinality_analysis_handler.go b/pkg/querier/cardinality_analysis_handler.go index 9a6d57081bd..90ecec58005 100644 --- a/pkg/querier/cardinality_analysis_handler.go +++ b/pkg/querier/cardinality_analysis_handler.go @@ -19,7 +19,6 @@ import ( "github.com/grafana/mimir/pkg/querier/api" "github.com/grafana/mimir/pkg/querier/worker" "github.com/grafana/mimir/pkg/util" - util_math "github.com/grafana/mimir/pkg/util/math" "github.com/grafana/mimir/pkg/util/validation" ) @@ -196,7 +195,7 @@ func toLabelNamesCardinalityResponse(response *ingester_client.LabelNamesAndValu labelsWithValues := response.Items sortByValuesCountAndName(labelsWithValues) valuesCountTotal := getValuesCountTotal(labelsWithValues) - items := make([]*api.LabelNamesCardinalityItem, util_math.Min(len(labelsWithValues), limit)) + items := make([]*api.LabelNamesCardinalityItem, min(len(labelsWithValues), limit)) for i := 0; i < len(items); i++ { items[i] = &api.LabelNamesCardinalityItem{LabelName: labelsWithValues[i].LabelName, LabelValuesCount: len(labelsWithValues[i].Values)} } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 92640fd2b83..8c44c89cf65 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -35,7 +35,6 @@ import ( "github.com/grafana/mimir/pkg/util" "github.com/grafana/mimir/pkg/util/activitytracker" "github.com/grafana/mimir/pkg/util/limiter" - "github.com/grafana/mimir/pkg/util/math" "github.com/grafana/mimir/pkg/util/spanlogger" "github.com/grafana/mimir/pkg/util/validation" ) @@ -617,7 +616,7 @@ func clampMaxTime(spanLog *spanlogger.SpanLogger, maxT int64, refT int64, limitD // limits equal to 0 are considered to not be enabled return maxT } - clampedT := math.Min(maxT, refT+limitDelta.Milliseconds()) + clampedT := min(maxT, refT+limitDelta.Milliseconds()) if clampedT != maxT { logClampEvent(spanLog, maxT, clampedT, "max", limitName) @@ -640,7 +639,7 @@ func clampMinTime(spanLog *spanlogger.SpanLogger, minT int64, refT int64, limitD // limits equal to 0 are considered to not be enabled return minT } - clampedT := math.Max(minT, refT+limitDelta.Milliseconds()) + clampedT := max(minT, refT+limitDelta.Milliseconds()) if clampedT != minT { logClampEvent(spanLog, minT, clampedT, "min", limitName) diff --git a/pkg/querier/worker/worker.go b/pkg/querier/worker/worker.go index dcf22a10e9f..2b9459a116e 100644 --- a/pkg/querier/worker/worker.go +++ b/pkg/querier/worker/worker.go @@ -25,7 +25,6 @@ import ( "github.com/grafana/mimir/pkg/scheduler/schedulerdiscovery" "github.com/grafana/mimir/pkg/util/grpcencoding/s2" - "github.com/grafana/mimir/pkg/util/math" ) type Config struct { @@ -353,7 +352,7 @@ func (w *querierWorker) getDesiredConcurrency() map[string]int { ) // new adjusted minimum to ensure that each in-use instance has at least MinConcurrencyPerRequestQueue connections. - maxConcurrentWithMinPerInstance := math.Max( + maxConcurrentWithMinPerInstance := max( w.maxConcurrentRequests, MinConcurrencyPerRequestQueue*numInUse, ) if maxConcurrentWithMinPerInstance > w.maxConcurrentRequests { diff --git a/pkg/storegateway/bucket_chunk_reader.go b/pkg/storegateway/bucket_chunk_reader.go index 5e78cb94f57..b483ca19d7b 100644 --- a/pkg/storegateway/bucket_chunk_reader.go +++ b/pkg/storegateway/bucket_chunk_reader.go @@ -25,7 +25,6 @@ import ( "github.com/grafana/mimir/pkg/storage/tsdb" "github.com/grafana/mimir/pkg/storegateway/storepb" - util_math "github.com/grafana/mimir/pkg/util/math" "github.com/grafana/mimir/pkg/util/pool" ) @@ -68,7 +67,7 @@ func (r *bucketChunkReader) addLoad(id chunks.ChunkRef, seriesEntry, chunkEntry } r.toLoad[seq] = append(r.toLoad[seq], loadIdx{ offset: off, - length: util_math.Max(varint.MaxLen32, length), // If the length is 0, we need to at least fetch the length of the chunk. + length: max(varint.MaxLen32, length), // If the length is 0, we need to at least fetch the length of the chunk. seriesEntry: seriesEntry, chunkEntry: chunkEntry, }) diff --git a/pkg/storegateway/bucket_index_postings.go b/pkg/storegateway/bucket_index_postings.go index 70d25628420..6111374c6cc 100644 --- a/pkg/storegateway/bucket_index_postings.go +++ b/pkg/storegateway/bucket_index_postings.go @@ -21,7 +21,6 @@ import ( "github.com/grafana/mimir/pkg/storage/tsdb" "github.com/grafana/mimir/pkg/storegateway/indexheader" streamindex "github.com/grafana/mimir/pkg/storegateway/indexheader/index" - util_math "github.com/grafana/mimir/pkg/util/math" ) // rawPostingGroup keeps posting keys for single matcher. It is raw because there is no guarantee @@ -458,7 +457,7 @@ func (w labelValuesPostingsStrategy) selectPostings(matchersGroups []postingGrou completeMatchersPlusSeriesSize := completeMatchersSize + maxPossibleSeriesSize partialMatchersPlusSeriesSize := postingGroupsTotalSize(partialMatchersGroups) + maxPossibleSeriesSize - if util_math.Min(completeMatchersPlusSeriesSize, completeMatchersPlusLabelValuesSize) < partialMatchersPlusSeriesSize { + if min(completeMatchersPlusSeriesSize, completeMatchersPlusLabelValuesSize) < partialMatchersPlusSeriesSize { return matchersGroups, nil } return partialMatchersGroups, omittedMatchersGroups diff --git a/pkg/storegateway/bucket_index_reader.go b/pkg/storegateway/bucket_index_reader.go index 413527ff9a2..f97d1507235 100644 --- a/pkg/storegateway/bucket_index_reader.go +++ b/pkg/storegateway/bucket_index_reader.go @@ -36,7 +36,6 @@ import ( "github.com/grafana/mimir/pkg/storegateway/indexheader" streamindex "github.com/grafana/mimir/pkg/storegateway/indexheader/index" "github.com/grafana/mimir/pkg/util" - util_math "github.com/grafana/mimir/pkg/util/math" "github.com/grafana/mimir/pkg/util/pool" "github.com/grafana/mimir/pkg/util/spanlogger" ) @@ -455,7 +454,7 @@ func (r *bucketIndexReader) fetchPostings(ctx context.Context, keys []labels.Lab "labels_key", cachedLabelsKey, "block", r.block.meta.ULID, "bytes_len", len(b), - "bytes_head_hex", hex.EncodeToString(b[:util_math.Min(8, len(b))]), + "bytes_head_hex", hex.EncodeToString(b[:min(8, len(b))]), ) } diff --git a/pkg/storegateway/indexheader/encoding/encoding.go b/pkg/storegateway/indexheader/encoding/encoding.go index 5f560219f44..9a6ebc92441 100644 --- a/pkg/storegateway/indexheader/encoding/encoding.go +++ b/pkg/storegateway/indexheader/encoding/encoding.go @@ -12,8 +12,6 @@ import ( "github.com/dennwc/varint" "github.com/pkg/errors" - - "github.com/grafana/mimir/pkg/util/math" ) var ( @@ -49,7 +47,7 @@ func (d *Decbuf) CheckCrc32(castagnoliTable *crc32.Table) { rawBuf := make([]byte, maxChunkSize) for bytesToRead > 0 { - chunkSize := math.Min(bytesToRead, maxChunkSize) + chunkSize := min(bytesToRead, maxChunkSize) chunkBuf := rawBuf[0:chunkSize] err := d.r.readInto(chunkBuf) diff --git a/pkg/storegateway/series_chunks.go b/pkg/storegateway/series_chunks.go index f6a80aa73a3..f5d3648deed 100644 --- a/pkg/storegateway/series_chunks.go +++ b/pkg/storegateway/series_chunks.go @@ -14,7 +14,6 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/grafana/mimir/pkg/storegateway/storepb" - util_math "github.com/grafana/mimir/pkg/util/math" "github.com/grafana/mimir/pkg/util/pool" "github.com/grafana/mimir/pkg/util/spanlogger" ) @@ -378,7 +377,7 @@ func (c *loadingSeriesChunksSetIterator) Next() (retHasNext bool) { // Pre-allocate the series slice using the expected batchSize even if nextUnloaded has less elements, // so that there's a higher chance the slice will be reused once released. - nextSet := newSeriesChunksSet(util_math.Max(c.fromBatchSize, nextUnloaded.len()), true) + nextSet := newSeriesChunksSet(max(c.fromBatchSize, nextUnloaded.len()), true) // Release the set if an error occurred. defer func() { diff --git a/pkg/util/math/math.go b/pkg/util/math/math.go deleted file mode 100644 index 834416de9d2..00000000000 --- a/pkg/util/math/math.go +++ /dev/null @@ -1,26 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// Provenance-includes-location: https://github.com/cortexproject/cortex/blob/master/pkg/util/math/math.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: The Cortex Authors. - -package math - -import ( - "golang.org/x/exp/constraints" -) - -// Max returns the maximum of two ordered arguments. -func Max[T constraints.Ordered](a, b T) T { - if a > b { - return a - } - return b -} - -// Min returns the minimum of two ordered arguments. -func Min[T constraints.Ordered](a, b T) T { - if a < b { - return a - } - return b -} diff --git a/tools/wal-reader/main.go b/tools/wal-reader/main.go index 8399e29f70e..16c95ab2948 100644 --- a/tools/wal-reader/main.go +++ b/tools/wal-reader/main.go @@ -15,8 +15,6 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/wlog" - - util_math "github.com/grafana/mimir/pkg/util/math" ) func main() { @@ -172,8 +170,8 @@ func printWalEntries(r *wlog.Reader, seriesMap map[chunks.HeadSeriesRef]string, log.Println("seg:", seg, "off:", off, "samples record:", s.Ref, s.T, formatTimestamp(s.T), s.V, si) } - *minSampleTime = util_math.Min(s.T, *minSampleTime) - *maxSampleTime = util_math.Max(s.T, *maxSampleTime) + *minSampleTime = min(s.T, *minSampleTime) + *maxSampleTime = max(s.T, *maxSampleTime) } case record.Tombstones: @@ -213,8 +211,8 @@ func printWalEntries(r *wlog.Reader, seriesMap map[chunks.HeadSeriesRef]string, log.Println("seg:", seg, "off:", off, "histograms record:", s.Ref, s.T, formatTimestamp(s.T), si) } - *minSampleTime = util_math.Min(s.T, *minSampleTime) - *maxSampleTime = util_math.Max(s.T, *maxSampleTime) + *minSampleTime = min(s.T, *minSampleTime) + *maxSampleTime = max(s.T, *maxSampleTime) } case record.FloatHistogramSamples: @@ -229,8 +227,8 @@ func printWalEntries(r *wlog.Reader, seriesMap map[chunks.HeadSeriesRef]string, log.Println("seg:", seg, "off:", off, "float histograms record:", s.Ref, s.T, formatTimestamp(s.T), si) } - *minSampleTime = util_math.Min(s.T, *minSampleTime) - *maxSampleTime = util_math.Max(s.T, *maxSampleTime) + *minSampleTime = min(s.T, *minSampleTime) + *maxSampleTime = max(s.T, *maxSampleTime) } case record.Metadata: