diff --git a/.github/workflows/xk6-tests/xk6-js-test/jstest.go b/.github/workflows/xk6-tests/xk6-js-test/jstest.go index 2fd327e0d2d..269dc83137a 100644 --- a/.github/workflows/xk6-tests/xk6-js-test/jstest.go +++ b/.github/workflows/xk6-tests/xk6-js-test/jstest.go @@ -18,6 +18,8 @@ type ( // JSTest is meant to test xk6 and the JS extension sub-system of k6. JSTest struct { vu modules.VU + + foos *stats.Metric } ) @@ -35,7 +37,10 @@ func New() *RootModule { // NewModuleInstance implements the modules.Module interface and returns // a new instance for each VU. func (*RootModule) NewModuleInstance(vu modules.VU) modules.Instance { - return &JSTest{vu: vu} + return &JSTest{ + vu: vu, + foos: vu.InitEnv().Registry.MustNewMetric("foos", stats.Counter), + } } // Exports implements the modules.Instance interface and returns the exports @@ -45,7 +50,7 @@ func (j *JSTest) Exports() modules.Exports { } // Foo emits a foo metric -func (j JSTest) Foo(arg float64) (bool, error) { +func (j *JSTest) Foo(arg float64) (bool, error) { state := j.vu.State() if state == nil { return false, fmt.Errorf("the VU State is not avaialble in the init context") @@ -53,12 +58,11 @@ func (j JSTest) Foo(arg float64) (bool, error) { ctx := j.vu.Context() - allTheFoos := stats.New("foos", stats.Counter) tags := state.CloneTags() tags["foo"] = "bar" stats.PushIfNotDone(ctx, state.Samples, stats.Sample{ Time: time.Now(), - Metric: allTheFoos, Tags: stats.IntoSampleTags(&tags), + Metric: j.foos, Tags: stats.IntoSampleTags(&tags), Value: arg, }) diff --git a/api/server_test.go b/api/server_test.go index 585ce3c6bda..2e7df53baee 100644 --- a/api/server_test.go +++ b/api/server_test.go @@ -35,9 +35,9 @@ import ( "go.k6.io/k6/core" "go.k6.io/k6/core/local" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/minirunner" + "go.k6.io/k6/metrics" ) func testHTTPHandler(rw http.ResponseWriter, r *http.Request) { @@ -80,11 +80,11 @@ func TestLogger(t *testing.T) { func TestWithEngine(t *testing.T) { logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, logger) - require.NoError(t, err) registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, builtinMetrics) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, builtinMetrics, logger) + require.NoError(t, err) + engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) require.NoError(t, err) rw := httptest.NewRecorder() diff --git a/api/v1/group_routes_test.go b/api/v1/group_routes_test.go index 2fa8a0937b6..f5765bbaa5c 100644 --- a/api/v1/group_routes_test.go +++ b/api/v1/group_routes_test.go @@ -33,9 +33,9 @@ import ( "go.k6.io/k6/core" "go.k6.io/k6/core/local" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/minirunner" + "go.k6.io/k6/metrics" ) func TestGetGroups(t *testing.T) { @@ -49,11 +49,11 @@ func TestGetGroups(t *testing.T) { logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Group: g0}, logger) - require.NoError(t, err) registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, builtinMetrics) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Group: g0}, builtinMetrics, logger) + require.NoError(t, err) + engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) require.NoError(t, err) t.Run("list", func(t *testing.T) { diff --git a/api/v1/metric_routes.go b/api/v1/metric_routes.go index 6ffdd929d02..2855e4bcdeb 100644 --- a/api/v1/metric_routes.go +++ b/api/v1/metric_routes.go @@ -36,9 +36,9 @@ func handleGetMetrics(rw http.ResponseWriter, r *http.Request) { t = engine.ExecutionScheduler.GetState().GetCurrentTestRunDuration() } - engine.MetricsLock.Lock() - metrics := newMetricsJSONAPI(engine.Metrics, t) - engine.MetricsLock.Unlock() + engine.MetricsEngine.MetricsLock.Lock() + metrics := newMetricsJSONAPI(engine.MetricsEngine.ObservedMetrics, t) + engine.MetricsEngine.MetricsLock.Unlock() data, err := json.Marshal(metrics) if err != nil { @@ -56,13 +56,17 @@ func handleGetMetric(rw http.ResponseWriter, r *http.Request, id string) { t = engine.ExecutionScheduler.GetState().GetCurrentTestRunDuration() } - metric, ok := engine.Metrics[id] + engine.MetricsEngine.MetricsLock.Lock() + metric, ok := engine.MetricsEngine.ObservedMetrics[id] if !ok { + engine.MetricsEngine.MetricsLock.Unlock() apiError(rw, "Not Found", "No metric with that ID was found", http.StatusNotFound) return } + wrappedMetric := newMetricEnvelope(metric, t) + engine.MetricsEngine.MetricsLock.Unlock() - data, err := json.Marshal(newMetricEnvelope(metric, t)) + data, err := json.Marshal(wrappedMetric) if err != nil { apiError(rw, "Encoding error", err.Error(), http.StatusInternalServerError) return diff --git a/api/v1/metric_routes_test.go b/api/v1/metric_routes_test.go index a32e307f9a2..80d37fe4ca6 100644 --- a/api/v1/metric_routes_test.go +++ b/api/v1/metric_routes_test.go @@ -34,9 +34,9 @@ import ( "go.k6.io/k6/core" "go.k6.io/k6/core/local" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/minirunner" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) @@ -45,17 +45,17 @@ func TestGetMetrics(t *testing.T) { logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, logger) - require.NoError(t, err) registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, builtinMetrics) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, builtinMetrics, logger) + require.NoError(t, err) + engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) require.NoError(t, err) - engine.Metrics = map[string]*stats.Metric{ + engine.MetricsEngine.ObservedMetrics = map[string]*stats.Metric{ "my_metric": stats.New("my_metric", stats.Trend, stats.Time), } - engine.Metrics["my_metric"].Tainted = null.BoolFrom(true) + engine.MetricsEngine.ObservedMetrics["my_metric"].Tainted = null.BoolFrom(true) rw := httptest.NewRecorder() NewHandler().ServeHTTP(rw, newRequestWithEngine(engine, "GET", "/v1/metrics", nil)) @@ -105,17 +105,17 @@ func TestGetMetric(t *testing.T) { logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, logger) - require.NoError(t, err) registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, builtinMetrics) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, builtinMetrics, logger) + require.NoError(t, err) + engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) require.NoError(t, err) - engine.Metrics = map[string]*stats.Metric{ + engine.MetricsEngine.ObservedMetrics = map[string]*stats.Metric{ "my_metric": stats.New("my_metric", stats.Trend, stats.Time), } - engine.Metrics["my_metric"].Tainted = null.BoolFrom(true) + engine.MetricsEngine.ObservedMetrics["my_metric"].Tainted = null.BoolFrom(true) t.Run("nonexistent", func(t *testing.T) { t.Parallel() diff --git a/api/v1/setup_teardown_routes_test.go b/api/v1/setup_teardown_routes_test.go index 0f7f8c3204d..a274b4c91bb 100644 --- a/api/v1/setup_teardown_routes_test.go +++ b/api/v1/setup_teardown_routes_test.go @@ -24,6 +24,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "net/http" "net/http/httptest" "net/url" @@ -39,10 +40,10 @@ import ( "go.k6.io/k6/core/local" "go.k6.io/k6/js" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/types" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" ) func TestSetupData(t *testing.T) { @@ -133,15 +134,17 @@ func TestSetupData(t *testing.T) { }, }, } - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - for _, testCase := range testCases { - testCase := testCase + + runTestCase := func(t *testing.T, tcid int) { + testCase := testCases[tcid] t.Run(testCase.name, func(t *testing.T) { t.Parallel() + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + runner, err := js.New( logger, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: testCase.script}, @@ -159,19 +162,22 @@ func TestSetupData(t *testing.T) { SetupTimeout: types.NullDurationFrom(5 * time.Second), TeardownTimeout: types.NullDurationFrom(5 * time.Second), }) - execScheduler, err := local.NewExecutionScheduler(runner, logger) + execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, runner.GetOptions(), lib.RuntimeOptions{}, nil, logger, builtinMetrics) + engine, err := core.NewEngine(execScheduler, runner.GetOptions(), lib.RuntimeOptions{}, nil, logger, registry) require.NoError(t, err) + require.NoError(t, engine.OutputManager.StartOutputs()) + defer engine.OutputManager.StopOutputs() + globalCtx, globalCancel := context.WithCancel(context.Background()) runCtx, runCancel := context.WithCancel(globalCtx) run, wait, err := engine.Init(globalCtx, runCtx) + require.NoError(t, err) + defer wait() defer globalCancel() - require.NoError(t, err) - errC := make(chan error) go func() { errC <- run() }() @@ -211,4 +217,12 @@ func TestSetupData(t *testing.T) { } }) } + + for id := range testCases { + id := id + t.Run(fmt.Sprintf("testcase_%d", id), func(t *testing.T) { + t.Parallel() + runTestCase(t, id) + }) + } } diff --git a/api/v1/status_routes_test.go b/api/v1/status_routes_test.go index d59f19802a5..70b530646c8 100644 --- a/api/v1/status_routes_test.go +++ b/api/v1/status_routes_test.go @@ -37,9 +37,9 @@ import ( "go.k6.io/k6/core" "go.k6.io/k6/core/local" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/minirunner" + "go.k6.io/k6/metrics" ) func TestGetStatus(t *testing.T) { @@ -47,11 +47,11 @@ func TestGetStatus(t *testing.T) { logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, logger) - require.NoError(t, err) registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, builtinMetrics) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, builtinMetrics, logger) + require.NoError(t, err) + engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) require.NoError(t, err) rw := httptest.NewRecorder() @@ -124,34 +124,44 @@ func TestPatchStatus(t *testing.T) { Payload: []byte(`{"data":{"type":"status","id":"default","attributes":{"status":0,"paused":null,"vus":10,"vus-max":10,"stopped":false,"running":false,"tainted":false}}}`), }, } - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - - scenarios := lib.ScenarioConfigs{} - err := json.Unmarshal([]byte(` - {"external": {"executor": "externally-controlled", - "vus": 0, "maxVUs": 10, "duration": "1s"}}`), &scenarios) - require.NoError(t, err) - options := lib.Options{Scenarios: scenarios} - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) for name, testCase := range testData { t.Run(name, func(t *testing.T) { t.Parallel() + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Options: options}, logger) + scenarios := lib.ScenarioConfigs{} + err := json.Unmarshal([]byte(` + {"external": {"executor": "externally-controlled", + "vus": 0, "maxVUs": 10, "duration": "0"}}`), &scenarios) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, options, lib.RuntimeOptions{}, nil, logger, builtinMetrics) + options := lib.Options{Scenarios: scenarios} + + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Options: options}, builtinMetrics, logger) require.NoError(t, err) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - run, _, err := engine.Init(ctx, ctx) + engine, err := core.NewEngine(execScheduler, options, lib.RuntimeOptions{}, nil, logger, registry) require.NoError(t, err) - go func() { _ = run() }() + require.NoError(t, engine.OutputManager.StartOutputs()) + defer engine.OutputManager.StopOutputs() + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + run, wait, err := engine.Init(ctx, ctx) + require.NoError(t, err) + + defer func() { + cancel() + wait() + }() + + go func() { + assert.NoError(t, run()) + }() // wait for the executor to initialize to avoid a potential data race below - time.Sleep(100 * time.Millisecond) + time.Sleep(200 * time.Millisecond) rw := httptest.NewRecorder() NewHandler().ServeHTTP(rw, newRequestWithEngine(engine, "PATCH", "/v1/status", bytes.NewReader(testCase.Payload))) diff --git a/cmd/cloud.go b/cmd/cloud.go index 59e9d0a85a6..ad0f4be69cf 100644 --- a/cmd/cloud.go +++ b/cmd/cloud.go @@ -200,10 +200,10 @@ func (c *cmdCloud) run(cmd *cobra.Command, args []string) error { globalCancel() }() } - hardStop := func(sig os.Signal) { + onHardStop := func(sig os.Signal) { logger.WithField("sig", sig).Error("Aborting k6 in response to signal, we won't wait for the test to end.") } - stopSignalHandling := handleTestAbortSignals(c.gs, gracefulStop, hardStop) + stopSignalHandling := handleTestAbortSignals(c.gs, gracefulStop, onHardStop) defer stopSignalHandling() et, err := lib.NewExecutionTuple(test.derivedConfig.ExecutionSegment, test.derivedConfig.ExecutionSegmentSequence) diff --git a/cmd/common.go b/cmd/common.go index 5d39fd950bd..47e51555270 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -92,7 +92,7 @@ func printToStdout(gs *globalState, s string) { } // Trap Interrupts, SIGINTs and SIGTERMs and call the given. -func handleTestAbortSignals(gs *globalState, firstHandler, secondHandler func(os.Signal)) (stop func()) { +func handleTestAbortSignals(gs *globalState, gracefulStopHandler, onHardStop func(os.Signal)) (stop func()) { sigC := make(chan os.Signal, 2) done := make(chan struct{}) gs.signalNotify(sigC, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) @@ -100,15 +100,15 @@ func handleTestAbortSignals(gs *globalState, firstHandler, secondHandler func(os go func() { select { case sig := <-sigC: - firstHandler(sig) + gracefulStopHandler(sig) case <-done: return } select { case sig := <-sigC: - if secondHandler != nil { - secondHandler(sig) + if onHardStop != nil { + onHardStop(sig) } // If we get a second signal, we immediately exit, so something like // https://github.com/k6io/k6/issues/971 never happens again diff --git a/cmd/integration_test.go b/cmd/integration_test.go index ae60195d6ce..de919c2db20 100644 --- a/cmd/integration_test.go +++ b/cmd/integration_test.go @@ -2,7 +2,9 @@ package cmd import ( "bytes" + "encoding/json" "path/filepath" + "strings" "testing" "github.com/sirupsen/logrus" @@ -132,6 +134,101 @@ func TestWrongEnvVarIterations(t *testing.T) { assert.Empty(t, ts.loggerHook.Drain()) } +func TestMetricsAndThresholds(t *testing.T) { + t.Parallel() + script := ` + import { Counter } from 'k6/metrics'; + + var setupCounter = new Counter('setup_counter'); + var teardownCounter = new Counter('teardown_counter'); + var defaultCounter = new Counter('default_counter'); + let unusedCounter = new Counter('unused_counter'); + + export const options = { + scenarios: { + sc1: { + executor: 'per-vu-iterations', + vus: 1, + iterations: 1, + }, + sc2: { + executor: 'shared-iterations', + vus: 1, + iterations: 1, + }, + }, + thresholds: { + 'setup_counter': ['count == 1'], + 'teardown_counter': ['count == 1'], + 'default_counter': ['count == 2'], + 'default_counter{scenario:sc1}': ['count == 1'], + 'default_counter{scenario:sc2}': ['count == 1'], + 'iterations': ['count == 2'], + 'iterations{scenario:sc1}': ['count == 1'], + 'iterations{scenario:sc2}': ['count == 1'], + 'default_counter{nonexistent:tag}': ['count == 0'], + 'unused_counter': ['count == 0'], + 'http_req_duration{status:200}': [' max == 0'], // no HTTP requests + }, + }; + + export function setup() { + console.log('setup() start'); + setupCounter.add(1); + console.log('setup() end'); + return { foo: 'bar' } + } + + export default function (data) { + console.log('default(' + JSON.stringify(data) + ')'); + defaultCounter.add(1); + } + + export function teardown(data) { + console.log('teardown(' + JSON.stringify(data) + ')'); + teardownCounter.add(1); + } + + export function handleSummary(data) { + console.log('handleSummary()'); + return { stdout: JSON.stringify(data, null, 4) } + } + ` + ts := newGlobalTestState(t) + require.NoError(t, afero.WriteFile(ts.fs, filepath.Join(ts.cwd, "test.js"), []byte(script), 0o644)) + ts.args = []string{"k6", "run", "--quiet", "--log-format=raw", "test.js"} + + newRootCommand(ts.globalState).execute() + + expLogLines := []string{ + `setup() start`, `setup() end`, `default({"foo":"bar"})`, + `default({"foo":"bar"})`, `teardown({"foo":"bar"})`, `handleSummary()`, + } + + logHookEntries := ts.loggerHook.Drain() + require.Len(t, logHookEntries, len(expLogLines)) + for i, expLogLine := range expLogLines { + assert.Equal(t, expLogLine, logHookEntries[i].Message) + } + + assert.Equal(t, strings.Join(expLogLines, "\n")+"\n", ts.stdErr.String()) + + var summary map[string]interface{} + require.NoError(t, json.Unmarshal(ts.stdOut.Bytes(), &summary)) + + metrics, ok := summary["metrics"].(map[string]interface{}) + require.True(t, ok) + + teardownCounter, ok := metrics["teardown_counter"].(map[string]interface{}) + require.True(t, ok) + + teardownThresholds, ok := teardownCounter["thresholds"].(map[string]interface{}) + require.True(t, ok) + + expected := map[string]interface{}{"count == 1": map[string]interface{}{"ok": true}} + require.Equal(t, expected, teardownThresholds) +} + // TODO: add a hell of a lot more integration tests, including some that spin up // a test HTTP server and actually check if k6 hits it diff --git a/cmd/integration_tests/eventloop/eventloop_test.go b/cmd/integration_tests/eventloop/eventloop_test.go index febdb751d45..1de4efb3c15 100644 --- a/cmd/integration_tests/eventloop/eventloop_test.go +++ b/cmd/integration_tests/eventloop/eventloop_test.go @@ -15,11 +15,11 @@ import ( "go.k6.io/k6/js/modules" "go.k6.io/k6/lib" "go.k6.io/k6/lib/executor" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" "gopkg.in/guregu/null.v3" ) @@ -51,11 +51,11 @@ func eventLoopTest(t *testing.T, script []byte, testHandle func(context.Context, lib.Options{ TeardownTimeout: types.NullDurationFrom(time.Second), SetupTimeout: types.NullDurationFrom(time.Second), - }) + }, builtinMetrics) defer cancel() errCh := make(chan error, 1) - go func() { errCh <- execScheduler.Run(ctx, ctx, samples, builtinMetrics) }() + go func() { errCh <- execScheduler.Run(ctx, ctx, samples) }() select { case err := <-errCh: @@ -200,7 +200,7 @@ export default function() { } func newTestExecutionScheduler( - t *testing.T, runner lib.Runner, logger *logrus.Logger, opts lib.Options, + t *testing.T, runner lib.Runner, logger *logrus.Logger, opts lib.Options, builtinMetrics *metrics.BuiltinMetrics, ) (ctx context.Context, cancel func(), execScheduler *local.ExecutionScheduler, samples chan stats.SampleContainer) { if runner == nil { runner = &minirunner.MiniRunner{} @@ -219,7 +219,7 @@ func newTestExecutionScheduler( logger.SetOutput(testutils.NewTestOutput(t)) } - execScheduler, err = local.NewExecutionScheduler(runner, logger) + execScheduler, err = local.NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) samples = make(chan stats.SampleContainer, newOpts.MetricSamplesBufferSize.Int64) diff --git a/cmd/run.go b/cmd/run.go index 82222f5653b..27f8555277f 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -89,7 +89,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { logger := c.gs.logger // Create a local execution scheduler wrapping the runner. logger.Debug("Initializing the execution scheduler...") - execScheduler, err := local.NewExecutionScheduler(test.initRunner, logger) + execScheduler, err := local.NewExecutionScheduler(test.initRunner, test.builtInMetrics, logger) if err != nil { return err } @@ -119,11 +119,15 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { return err } + // TODO: create a MetricsEngine here and add its ingester to the list of + // outputs (unless both NoThresholds and NoSummary were enabled) + + // TODO: remove this completely // Create the engine. initBar.Modify(pb.WithConstProgress(0, "Init engine")) engine, err := core.NewEngine( execScheduler, conf.Options, test.runtimeOptions, - outputs, logger, test.builtInMetrics, + outputs, logger, test.metricsRegistry, ) if err != nil { return err @@ -134,6 +138,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { initBar.Modify(pb.WithConstProgress(0, "Init API server")) go func() { logger.Debugf("Starting the REST API server on %s", c.gs.flags.address) + // TODO: send the ExecutionState and MetricsEngine instead of the Engine if aerr := api.ListenAndServe(c.gs.flags.address, engine, logger); aerr != nil { // Only exit k6 if the user has explicitly set the REST API address if cmd.Flags().Lookup("address").Changed { @@ -148,11 +153,12 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { // We do this here so we can get any output URLs below. initBar.Modify(pb.WithConstProgress(0, "Starting outputs")) - err = engine.StartOutputs() + // TODO: directly create the MutputManager here, not in the Engine + err = engine.OutputManager.StartOutputs() if err != nil { return err } - defer engine.StopOutputs() + defer engine.OutputManager.StopOutputs() printExecutionDescription( c.gs, "local", args[0], "", conf, execScheduler.GetState().ExecutionTuple, executionPlan, outputs, @@ -163,11 +169,11 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { logger.WithField("sig", sig).Debug("Stopping k6 in response to signal...") lingerCancel() // stop the test run, metric processing is cancelled below } - hardStop := func(sig os.Signal) { + onHardStop := func(sig os.Signal) { logger.WithField("sig", sig).Error("Aborting k6 in response to signal") globalCancel() // not that it matters, given the following command... } - stopSignalHandling := handleTestAbortSignals(c.gs, gracefulStop, hardStop) + stopSignalHandling := handleTestAbortSignals(c.gs, gracefulStop, onHardStop) defer stopSignalHandling() // Initialize the engine @@ -225,9 +231,10 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { // Handle the end-of-test summary. if !test.runtimeOptions.NoSummary.Bool { + engine.MetricsEngine.MetricsLock.Lock() // TODO: refactor so this is not needed summaryResult, err := test.initRunner.HandleSummary(globalCtx, &lib.Summary{ - Metrics: engine.Metrics, - RootGroup: engine.ExecutionScheduler.GetRunner().GetDefaultGroup(), + Metrics: engine.MetricsEngine.ObservedMetrics, + RootGroup: execScheduler.GetRunner().GetDefaultGroup(), TestRunDuration: executionState.GetCurrentTestRunDuration(), NoColor: c.gs.flags.noColor, UIState: lib.UIState{ @@ -235,6 +242,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { IsStdErrTTY: c.gs.stdErr.isTTY, }, }) + engine.MetricsEngine.MetricsLock.Unlock() if err == nil { err = handleSummaryResult(c.gs.fs, c.gs.stdOut, c.gs.stdErr, summaryResult) } diff --git a/cmd/runtime_options_test.go b/cmd/runtime_options_test.go index 823c4e2330f..a5d642fecbf 100644 --- a/cmd/runtime_options_test.go +++ b/cmd/runtime_options_test.go @@ -32,8 +32,8 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" ) type runtimeOptionsTestCase struct { diff --git a/cmd/test_load.go b/cmd/test_load.go index 29d2952dffe..97bc263062a 100644 --- a/cmd/test_load.go +++ b/cmd/test_load.go @@ -12,8 +12,8 @@ import ( "go.k6.io/k6/errext/exitcodes" "go.k6.io/k6/js" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" ) const ( diff --git a/converter/har/converter_test.go b/converter/har/converter_test.go index c898ba3ba7c..9afee3ae75d 100644 --- a/converter/har/converter_test.go +++ b/converter/har/converter_test.go @@ -29,9 +29,9 @@ import ( "go.k6.io/k6/js" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" ) func TestBuildK6Headers(t *testing.T) { diff --git a/core/engine.go b/core/engine.go index 805f5527ce7..07e9ea8cc00 100644 --- a/core/engine.go +++ b/core/engine.go @@ -23,23 +23,21 @@ package core import ( "context" "errors" - "strings" "sync" "time" "github.com/sirupsen/logrus" - "gopkg.in/guregu/null.v3" "go.k6.io/k6/errext" "go.k6.io/k6/js/common" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/metrics" + "go.k6.io/k6/metrics/engine" "go.k6.io/k6/output" "go.k6.io/k6/stats" ) const ( - metricsRate = 1 * time.Second collectRate = 50 * time.Millisecond thresholdsRate = 2 * time.Second ) @@ -54,35 +52,31 @@ type Engine struct { // expects to be able to get information from the Engine and is initialized // before the Init() call... + // TODO: completely remove the engine and use all of these separately, in a + // much more composable and testable manner ExecutionScheduler lib.ExecutionScheduler - executionState *lib.ExecutionState + MetricsEngine *engine.MetricsEngine + OutputManager *output.Manager - Options lib.Options runtimeOptions lib.RuntimeOptions - outputs []output.Output + + ingester output.Output logger *logrus.Entry stopOnce sync.Once stopChan chan struct{} - Metrics map[string]*stats.Metric - MetricsLock sync.Mutex - - builtinMetrics *metrics.BuiltinMetrics - Samples chan stats.SampleContainer - - // Assigned to metrics upon first received sample. - thresholds map[string]stats.Thresholds - submetrics map[string][]*stats.Submetric + Samples chan stats.SampleContainer // Are thresholds tainted? - thresholdsTainted bool + thresholdsTaintedLock sync.Mutex + thresholdsTainted bool } // NewEngine instantiates a new Engine, without doing any heavy initialization. func NewEngine( ex lib.ExecutionScheduler, opts lib.Options, rtOpts lib.RuntimeOptions, outputs []output.Output, logger *logrus.Logger, - builtinMetrics *metrics.BuiltinMetrics, + registry *metrics.Registry, ) (*Engine, error) { if ex == nil { return nil, errors.New("missing ExecutionScheduler instance") @@ -90,83 +84,32 @@ func NewEngine( e := &Engine{ ExecutionScheduler: ex, - executionState: ex.GetState(), - Options: opts, runtimeOptions: rtOpts, - outputs: outputs, - Metrics: make(map[string]*stats.Metric), Samples: make(chan stats.SampleContainer, opts.MetricSamplesBufferSize.Int64), stopChan: make(chan struct{}), logger: logger.WithField("component", "engine"), - builtinMetrics: builtinMetrics, } - e.thresholds = opts.Thresholds - e.submetrics = make(map[string][]*stats.Submetric) - for name := range e.thresholds { - if !strings.Contains(name, "{") { - continue - } - - parent, sm := stats.NewSubmetric(name) - e.submetrics[parent] = append(e.submetrics[parent], sm) + me, err := engine.NewMetricsEngine(registry, ex.GetState(), opts, rtOpts, logger) + if err != nil { + return nil, err } + e.MetricsEngine = me - // TODO: refactor this out of here when https://github.com/k6io/k6/issues/1832 lands and - // there is a better way to enable a metric with tag - if opts.SystemTags.Has(stats.TagExpectedResponse) { - for _, name := range []string{ - "http_req_duration{expected_response:true}", - } { - if _, ok := e.thresholds[name]; ok { - continue - } - parent, sm := stats.NewSubmetric(name) - e.submetrics[parent] = append(e.submetrics[parent], sm) - } + if !(rtOpts.NoSummary.Bool && rtOpts.NoThresholds.Bool) { + e.ingester = me.GetIngester() + outputs = append(outputs, e.ingester) } - return e, nil -} - -// StartOutputs spins up all configured outputs, giving the thresholds to any -// that can accept them. And if some output fails, stop the already started -// ones. This may take some time, since some outputs make initial network -// requests to set up whatever remote services are going to listen to them. -// -// TODO: this doesn't really need to be in the Engine, so take it out? -func (e *Engine) StartOutputs() error { - e.logger.Debugf("Starting %d outputs...", len(e.outputs)) - for i, out := range e.outputs { - if stopOut, ok := out.(output.WithTestRunStop); ok { - stopOut.SetTestRunStopCallback( - func(err error) { - e.logger.WithError(err).Error("Received error to stop from output") - e.Stop() - }) - } - - if err := out.Start(); err != nil { - e.stopOutputs(i) - return err + e.OutputManager = output.NewManager(outputs, logger, func(err error) { + if err != nil { + logger.WithError(err).Error("Received error to stop from output") } - } - return nil -} - -// StopOutputs stops all configured outputs. -func (e *Engine) StopOutputs() { - e.stopOutputs(len(e.outputs)) -} + e.Stop() + }) -func (e *Engine) stopOutputs(upToID int) { - e.logger.Debugf("Stopping %d outputs...", upToID) - for i := 0; i < upToID; i++ { - if err := e.outputs[i].Stop(); err != nil { - e.logger.WithError(err).Errorf("Stopping output %d failed", i) - } - } + return e, nil } // Init is used to initialize the execution scheduler and all metrics processing @@ -198,7 +141,7 @@ func (e *Engine) Init(globalCtx, runCtx context.Context) (run func() error, wait processMetricsAfterRun := make(chan struct{}) runFn := func() error { e.logger.Debug("Execution scheduler starting...") - err := e.ExecutionScheduler.Run(globalCtx, runSubCtx, e.Samples, e.builtinMetrics) + err := e.ExecutionScheduler.Run(globalCtx, runSubCtx, e.Samples) e.logger.WithError(err).Debug("Execution scheduler terminated") select { @@ -236,16 +179,6 @@ func (e *Engine) startBackgroundProcesses( e.processMetrics(globalCtx, processMetricsAfterRun) }() - // Run VU metrics emission, only while the test is running. - // TODO: move? this seems like something the ExecutionScheduler should emit... - processes.Add(1) - go func() { - defer processes.Done() - e.logger.Debug("Starting emission of VU metrics...") - e.runMetricsEmission(runCtx) - e.logger.Debug("Metrics emission terminated") - }() - // Update the test run status when the test finishes processes.Add(1) thresholdAbortChan := make(chan struct{}) @@ -258,27 +191,27 @@ func (e *Engine) startBackgroundProcesses( var serr errext.Exception switch { case errors.As(err, &serr): - e.setRunStatus(lib.RunStatusAbortedScriptError) + e.OutputManager.SetRunStatus(lib.RunStatusAbortedScriptError) case common.IsInterruptError(err): - e.setRunStatus(lib.RunStatusAbortedUser) + e.OutputManager.SetRunStatus(lib.RunStatusAbortedUser) default: - e.setRunStatus(lib.RunStatusAbortedSystem) + e.OutputManager.SetRunStatus(lib.RunStatusAbortedSystem) } } else { e.logger.Debug("run: execution scheduler terminated") - e.setRunStatus(lib.RunStatusFinished) + e.OutputManager.SetRunStatus(lib.RunStatusFinished) } case <-runCtx.Done(): e.logger.Debug("run: context expired; exiting...") - e.setRunStatus(lib.RunStatusAbortedUser) + e.OutputManager.SetRunStatus(lib.RunStatusAbortedUser) case <-e.stopChan: runSubCancel() e.logger.Debug("run: stopped by user; exiting...") - e.setRunStatus(lib.RunStatusAbortedUser) + e.OutputManager.SetRunStatus(lib.RunStatusAbortedUser) case <-thresholdAbortChan: e.logger.Debug("run: stopped by thresholds; exiting...") runSubCancel() - e.setRunStatus(lib.RunStatusAbortedThreshold) + e.OutputManager.SetRunStatus(lib.RunStatusAbortedThreshold) } }() @@ -294,7 +227,11 @@ func (e *Engine) startBackgroundProcesses( for { select { case <-ticker.C: - if e.processThresholds() { + thresholdsTainted, shouldAbort := e.MetricsEngine.EvaluateThresholds() + e.thresholdsTaintedLock.Lock() + e.thresholdsTainted = thresholdsTainted + e.thresholdsTaintedLock.Unlock() + if shouldAbort { close(thresholdAbortChan) return } @@ -320,10 +257,14 @@ func (e *Engine) processMetrics(globalCtx context.Context, processMetricsAfterRu for sc := range e.Samples { sampleContainers = append(sampleContainers, sc) } - e.processSamples(sampleContainers) + e.OutputManager.AddMetricSamples(sampleContainers) if !e.runtimeOptions.NoThresholds.Bool { - e.processThresholds() // Process the thresholds one final time + // Process the thresholds one final time + thresholdsTainted, _ := e.MetricsEngine.EvaluateThresholds() + e.thresholdsTaintedLock.Lock() + e.thresholdsTainted = thresholdsTainted + e.thresholdsTaintedLock.Unlock() } }() @@ -333,7 +274,7 @@ func (e *Engine) processMetrics(globalCtx context.Context, processMetricsAfterRu e.logger.Debug("Metrics processing started...") processSamples := func() { if len(sampleContainers) > 0 { - e.processSamples(sampleContainers) + e.OutputManager.AddMetricSamples(sampleContainers) // Make the new container with the same size as the previous // one, assuming that we produce roughly the same amount of // metrics data between ticks... @@ -357,7 +298,12 @@ func (e *Engine) processMetrics(globalCtx context.Context, processMetricsAfterRu e.logger.Debug("Processing metrics and thresholds after the test run has ended...") processSamples() if !e.runtimeOptions.NoThresholds.Bool { - e.processThresholds() + // Ensure the ingester flushes any buffered metrics + _ = e.ingester.Stop() + thresholdsTainted, _ := e.MetricsEngine.EvaluateThresholds() + e.thresholdsTaintedLock.Lock() + e.thresholdsTainted = thresholdsTainted + e.thresholdsTaintedLock.Unlock() } processMetricsAfterRun <- struct{}{} @@ -369,15 +315,9 @@ func (e *Engine) processMetrics(globalCtx context.Context, processMetricsAfterRu } } -func (e *Engine) setRunStatus(status lib.RunStatus) { - for _, out := range e.outputs { - if statUpdOut, ok := out.(output.WithRunStatusUpdates); ok { - statUpdOut.SetRunStatus(status) - } - } -} - func (e *Engine) IsTainted() bool { + e.thresholdsTaintedLock.Lock() + defer e.thresholdsTaintedLock.Unlock() return e.thresholdsTainted } @@ -397,125 +337,3 @@ func (e *Engine) IsStopped() bool { return false } } - -func (e *Engine) runMetricsEmission(ctx context.Context) { - ticker := time.NewTicker(metricsRate) - for { - select { - case <-ticker.C: - e.emitMetrics() - case <-ctx.Done(): - return - } - } -} - -func (e *Engine) emitMetrics() { - t := time.Now() - - executionState := e.ExecutionScheduler.GetState() - // TODO: optimize and move this, it shouldn't call processSamples() directly - e.processSamples([]stats.SampleContainer{stats.ConnectedSamples{ - Samples: []stats.Sample{ - { - Time: t, - Metric: e.builtinMetrics.VUs, - Value: float64(executionState.GetCurrentlyActiveVUsCount()), - Tags: e.Options.RunTags, - }, { - Time: t, - Metric: e.builtinMetrics.VUsMax, - Value: float64(executionState.GetInitializedVUsCount()), - Tags: e.Options.RunTags, - }, - }, - Tags: e.Options.RunTags, - Time: t, - }}) -} - -func (e *Engine) processThresholds() (shouldAbort bool) { - e.MetricsLock.Lock() - defer e.MetricsLock.Unlock() - - t := e.executionState.GetCurrentTestRunDuration() - - e.thresholdsTainted = false - for _, m := range e.Metrics { - if len(m.Thresholds.Thresholds) == 0 { - continue - } - m.Tainted = null.BoolFrom(false) - - e.logger.WithField("m", m.Name).Debug("running thresholds") - succ, err := m.Thresholds.Run(m.Sink, t) - if err != nil { - e.logger.WithField("m", m.Name).WithError(err).Error("Threshold error") - continue - } - if !succ { - e.logger.WithField("m", m.Name).Debug("Thresholds failed") - m.Tainted = null.BoolFrom(true) - e.thresholdsTainted = true - if m.Thresholds.Abort { - shouldAbort = true - } - } - } - - return shouldAbort -} - -func (e *Engine) processSamplesForMetrics(sampleContainers []stats.SampleContainer) { - for _, sampleContainer := range sampleContainers { - samples := sampleContainer.GetSamples() - - if len(samples) == 0 { - continue - } - - for _, sample := range samples { - m, ok := e.Metrics[sample.Metric.Name] - if !ok { - m = stats.New(sample.Metric.Name, sample.Metric.Type, sample.Metric.Contains) - m.Thresholds = e.thresholds[m.Name] - m.Submetrics = e.submetrics[m.Name] - e.Metrics[m.Name] = m - } - m.Sink.Add(sample) - - for _, sm := range m.Submetrics { - if !sample.Tags.Contains(sm.Tags) { - continue - } - - if sm.Metric == nil { - sm.Metric = stats.New(sm.Name, sample.Metric.Type, sample.Metric.Contains) - sm.Metric.Sub = *sm - sm.Metric.Thresholds = e.thresholds[sm.Name] - e.Metrics[sm.Name] = sm.Metric - } - sm.Metric.Sink.Add(sample) - } - } - } -} - -func (e *Engine) processSamples(sampleContainers []stats.SampleContainer) { - if len(sampleContainers) == 0 { - return - } - - // TODO: optimize this... - e.MetricsLock.Lock() - defer e.MetricsLock.Unlock() - - // TODO: run this and the below code in goroutines? - if !(e.runtimeOptions.NoSummary.Bool && e.runtimeOptions.NoThresholds.Bool) { - e.processSamplesForMetrics(sampleContainers) - } - - for _, out := range e.outputs { - out.AddMetricSamples(sampleContainers) - } -} diff --git a/core/engine_test.go b/core/engine_test.go index 685faa94d18..7f45d4bf19d 100644 --- a/core/engine_test.go +++ b/core/engine_test.go @@ -39,22 +39,25 @@ import ( "go.k6.io/k6/js" "go.k6.io/k6/lib" "go.k6.io/k6/lib/executor" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/httpmultibin" "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/testutils/mockoutput" "go.k6.io/k6/lib/types" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" "go.k6.io/k6/output" "go.k6.io/k6/stats" ) const isWindows = runtime.GOOS == "windows" +// TODO: completely rewrite all of these tests + // Wrapper around NewEngine that applies a logger and manages the options. -func newTestEngine( //nolint:golint +func newTestEngineWithRegistry( //nolint:golint t *testing.T, runCtx context.Context, runner lib.Runner, outputs []output.Output, opts lib.Options, + registry *metrics.Registry, ) (engine *Engine, run func() error, wait func()) { if runner == nil { runner = &minirunner.MiniRunner{} @@ -75,13 +78,13 @@ func newTestEngine( //nolint:golint require.NoError(t, runner.SetOptions(newOpts)) - execScheduler, err := local.NewExecutionScheduler(runner, logger) + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - engine, err = NewEngine(execScheduler, opts, lib.RuntimeOptions{}, outputs, logger, builtinMetrics) + engine, err = NewEngine(execScheduler, opts, lib.RuntimeOptions{}, outputs, logger, registry) require.NoError(t, err) + require.NoError(t, engine.OutputManager.StartOutputs()) run, waitFn, err := engine.Init(globalCtx, runCtx) require.NoError(t, err) @@ -92,9 +95,16 @@ func newTestEngine( //nolint:golint } globalCancel() waitFn() + engine.OutputManager.StopOutputs() } } +func newTestEngine( + t *testing.T, runCtx context.Context, runner lib.Runner, outputs []output.Output, opts lib.Options, //nolint:revive +) (engine *Engine, run func() error, wait func()) { + return newTestEngineWithRegistry(t, runCtx, runner, outputs, opts, metrics.NewRegistry()) +} + func TestNewEngine(t *testing.T) { t.Parallel() newTestEngine(t, nil, nil, nil, lib.Options{}) @@ -139,7 +149,10 @@ func TestEngineRun(t *testing.T) { // Make sure samples are discarded after context close (using "cutoff" timestamp in local.go) t.Run("collects samples", func(t *testing.T) { t.Parallel() - testMetric := stats.New("test_metric", stats.Trend) + + registry := metrics.NewRegistry() + testMetric, err := registry.NewMetric("test_metric", stats.Trend) + require.NoError(t, err) signalChan := make(chan interface{}) @@ -155,10 +168,10 @@ func TestEngineRun(t *testing.T) { mockOutput := mockoutput.New() ctx, cancel := context.WithCancel(context.Background()) - _, run, wait := newTestEngine(t, ctx, runner, []output.Output{mockOutput}, lib.Options{ + _, run, wait := newTestEngineWithRegistry(t, ctx, runner, []output.Output{mockOutput}, lib.Options{ VUs: null.IntFrom(1), Iterations: null.IntFrom(1), - }) + }, registry) errC := make(chan error) go func() { errC <- run() }() @@ -211,7 +224,10 @@ func TestEngineStopped(t *testing.T) { func TestEngineOutput(t *testing.T) { t.Parallel() - testMetric := stats.New("test_metric", stats.Trend) + + registry := metrics.NewRegistry() + testMetric, err := registry.NewMetric("test_metric", stats.Trend) + require.NoError(t, err) runner := &minirunner.MiniRunner{ Fn: func(ctx context.Context, _ *lib.State, out chan<- stats.SampleContainer) error { @@ -221,10 +237,10 @@ func TestEngineOutput(t *testing.T) { } mockOutput := mockoutput.New() - e, run, wait := newTestEngine(t, nil, runner, []output.Output{mockOutput}, lib.Options{ + e, run, wait := newTestEngineWithRegistry(t, nil, runner, []output.Output{mockOutput}, lib.Options{ VUs: null.IntFrom(1), Iterations: null.IntFrom(1), - }) + }, registry) assert.NoError(t, run()) wait() @@ -235,7 +251,7 @@ func TestEngineOutput(t *testing.T) { cSamples = append(cSamples, sample) } } - metric := e.Metrics["test_metric"] + metric := e.MetricsEngine.ObservedMetrics["test_metric"] if assert.NotNil(t, metric) { sink := metric.Sink.(*stats.TrendSink) if assert.NotNil(t, sink) { @@ -248,49 +264,64 @@ func TestEngineOutput(t *testing.T) { func TestEngine_processSamples(t *testing.T) { t.Parallel() - metric := stats.New("my_metric", stats.Gauge) t.Run("metric", func(t *testing.T) { t.Parallel() - e, _, wait := newTestEngine(t, nil, nil, nil, lib.Options{}) - defer wait() - e.processSamples( + registry := metrics.NewRegistry() + metric, err := registry.NewMetric("my_metric", stats.Gauge) + require.NoError(t, err) + + e, _, wait := newTestEngineWithRegistry(t, nil, nil, nil, lib.Options{}, registry) + + e.OutputManager.AddMetricSamples( []stats.SampleContainer{stats.Sample{Metric: metric, Value: 1.25, Tags: stats.IntoSampleTags(&map[string]string{"a": "1"})}}, ) - assert.IsType(t, &stats.GaugeSink{}, e.Metrics["my_metric"].Sink) + e.Stop() + wait() + + assert.IsType(t, &stats.GaugeSink{}, e.MetricsEngine.ObservedMetrics["my_metric"].Sink) }) t.Run("submetric", func(t *testing.T) { t.Parallel() + + registry := metrics.NewRegistry() + metric, err := registry.NewMetric("my_metric", stats.Gauge) + require.NoError(t, err) + ths := stats.NewThresholds([]string{`value<2`}) gotParseErr := ths.Parse() require.NoError(t, gotParseErr) - e, _, wait := newTestEngine(t, nil, nil, nil, lib.Options{ + e, _, wait := newTestEngineWithRegistry(t, nil, nil, nil, lib.Options{ Thresholds: map[string]stats.Thresholds{ "my_metric{a:1}": ths, }, - }) - defer wait() - - sms := e.submetrics["my_metric"] - assert.Len(t, sms, 1) - assert.Equal(t, "my_metric{a:1}", sms[0].Name) - assert.EqualValues(t, map[string]string{"a": "1"}, sms[0].Tags.CloneTags()) + }, registry) - e.processSamples( + e.OutputManager.AddMetricSamples( []stats.SampleContainer{stats.Sample{Metric: metric, Value: 1.25, Tags: stats.IntoSampleTags(&map[string]string{"a": "1", "b": "2"})}}, ) - assert.IsType(t, &stats.GaugeSink{}, e.Metrics["my_metric"].Sink) - assert.IsType(t, &stats.GaugeSink{}, e.Metrics["my_metric{a:1}"].Sink) + e.Stop() + wait() + + assert.Len(t, e.MetricsEngine.ObservedMetrics, 2) + sms := e.MetricsEngine.ObservedMetrics["my_metric{a:1}"] + assert.EqualValues(t, map[string]string{"a": "1"}, sms.Sub.Tags.CloneTags()) + + assert.IsType(t, &stats.GaugeSink{}, e.MetricsEngine.ObservedMetrics["my_metric"].Sink) + assert.IsType(t, &stats.GaugeSink{}, e.MetricsEngine.ObservedMetrics["my_metric{a:1}"].Sink) }) } func TestEngineThresholdsWillAbort(t *testing.T) { t.Parallel() - metric := stats.New("my_metric", stats.Gauge) + + registry := metrics.NewRegistry() + metric, err := registry.NewMetric("my_metric", stats.Gauge) + require.NoError(t, err) // The incoming samples for the metric set it to 1.25. Considering // the metric is of type Gauge, value > 1.25 should always fail, and @@ -302,18 +333,22 @@ func TestEngineThresholdsWillAbort(t *testing.T) { thresholds := map[string]stats.Thresholds{metric.Name: ths} - e, _, wait := newTestEngine(t, nil, nil, nil, lib.Options{Thresholds: thresholds}) - defer wait() + e, _, wait := newTestEngineWithRegistry(t, nil, nil, nil, lib.Options{Thresholds: thresholds}, registry) - e.processSamples( + e.OutputManager.AddMetricSamples( []stats.SampleContainer{stats.Sample{Metric: metric, Value: 1.25, Tags: stats.IntoSampleTags(&map[string]string{"a": "1"})}}, ) - assert.True(t, e.processThresholds()) + e.Stop() + wait() + assert.True(t, e.thresholdsTainted) } func TestEngineAbortedByThresholds(t *testing.T) { t.Parallel() - metric := stats.New("my_metric", stats.Gauge) + + registry := metrics.NewRegistry() + metric, err := registry.NewMetric("my_metric", stats.Gauge) + require.NoError(t, err) // The MiniRunner sets the value of the metric to 1.25. Considering // the metric is of type Gauge, value > 1.25 should always fail, and @@ -336,7 +371,7 @@ func TestEngineAbortedByThresholds(t *testing.T) { }, } - _, run, wait := newTestEngine(t, nil, runner, nil, lib.Options{Thresholds: thresholds}) + _, run, wait := newTestEngineWithRegistry(t, nil, runner, nil, lib.Options{Thresholds: thresholds}, registry) defer wait() go func() { @@ -353,44 +388,71 @@ func TestEngineAbortedByThresholds(t *testing.T) { func TestEngine_processThresholds(t *testing.T) { t.Parallel() - metric := stats.New("my_metric", stats.Gauge) testdata := map[string]struct { - pass bool - ths map[string][]string - abort bool + pass bool + ths map[string][]string }{ - "passing": {true, map[string][]string{"my_metric": {"value<2"}}, false}, - "failing": {false, map[string][]string{"my_metric": {"value>1.25"}}, false}, - "aborting": {false, map[string][]string{"my_metric": {"value>1.25"}}, true}, - - "submetric,match,passing": {true, map[string][]string{"my_metric{a:1}": {"value<2"}}, false}, - "submetric,match,failing": {false, map[string][]string{"my_metric{a:1}": {"value>1.25"}}, false}, - "submetric,nomatch,passing": {true, map[string][]string{"my_metric{a:2}": {"value<2"}}, false}, - "submetric,nomatch,failing": {true, map[string][]string{"my_metric{a:2}": {"value>1.25"}}, false}, + "passing": {true, map[string][]string{"my_metric": {"value<2"}}}, + "failing": {false, map[string][]string{"my_metric": {"value>1.25"}}}, + + "submetric,match,passing": {true, map[string][]string{"my_metric{a:1}": {"value<2"}}}, + "submetric,match,failing": {false, map[string][]string{"my_metric{a:1}": {"value>1.25"}}}, + "submetric,nomatch,passing": {true, map[string][]string{"my_metric{a:2}": {"value<2"}}}, + "submetric,nomatch,failing": {false, map[string][]string{"my_metric{a:2}": {"value>1.25"}}}, + + "unused,passing": {true, map[string][]string{"unused_counter": {"count==0"}}}, + "unused,failing": {false, map[string][]string{"unused_counter": {"count>1"}}}, + "unused,subm,passing": {true, map[string][]string{"unused_counter{a:2}": {"count<1"}}}, + "unused,subm,failing": {false, map[string][]string{"unused_counter{a:2}": {"count>1"}}}, + + "used,passing": {true, map[string][]string{"used_counter": {"count==2"}}}, + "used,failing": {false, map[string][]string{"used_counter": {"count<1"}}}, + "used,subm,passing": {true, map[string][]string{"used_counter{b:1}": {"count==2"}}}, + "used,not-subm,passing": {true, map[string][]string{"used_counter{b:2}": {"count==0"}}}, + "used,invalid-subm,passing1": {true, map[string][]string{"used_counter{c:''}": {"count==0"}}}, + "used,invalid-subm,failing1": {false, map[string][]string{"used_counter{c:''}": {"count>0"}}}, + "used,invalid-subm,passing2": {true, map[string][]string{"used_counter{c:}": {"count==0"}}}, + "used,invalid-subm,failing2": {false, map[string][]string{"used_counter{c:}": {"count>0"}}}, } for name, data := range testdata { name, data := name, data t.Run(name, func(t *testing.T) { t.Parallel() + + registry := metrics.NewRegistry() + gaugeMetric, err := registry.NewMetric("my_metric", stats.Gauge) + require.NoError(t, err) + counterMetric, err := registry.NewMetric("used_counter", stats.Counter) + require.NoError(t, err) + _, err = registry.NewMetric("unused_counter", stats.Counter) + require.NoError(t, err) + thresholds := make(map[string]stats.Thresholds, len(data.ths)) for m, srcs := range data.ths { ths := stats.NewThresholds(srcs) gotParseErr := ths.Parse() require.NoError(t, gotParseErr) - ths.Thresholds[0].AbortOnFail = data.abort thresholds[m] = ths } - e, _, wait := newTestEngine(t, nil, nil, nil, lib.Options{Thresholds: thresholds}) - defer wait() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + e, run, wait := newTestEngineWithRegistry( + t, ctx, &minirunner.MiniRunner{}, nil, lib.Options{Thresholds: thresholds}, registry, + ) - e.processSamples( - []stats.SampleContainer{stats.Sample{Metric: metric, Value: 1.25, Tags: stats.IntoSampleTags(&map[string]string{"a": "1"})}}, + e.OutputManager.AddMetricSamples( + []stats.SampleContainer{ + stats.Sample{Metric: gaugeMetric, Value: 1.25, Tags: stats.IntoSampleTags(&map[string]string{"a": "1"})}, + stats.Sample{Metric: counterMetric, Value: 2, Tags: stats.IntoSampleTags(&map[string]string{"b": "1"})}, + }, ) - assert.Equal(t, data.abort, e.processThresholds()) + require.NoError(t, run()) + wait() + assert.Equal(t, data.pass, !e.IsTainted()) }) } @@ -843,9 +905,9 @@ func TestVuInitException(t *testing.T) { require.Empty(t, opts.Validate()) require.NoError(t, runner.SetOptions(opts)) - execScheduler, err := local.NewExecutionScheduler(runner, logger) + execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) - engine, err := NewEngine(execScheduler, opts, lib.RuntimeOptions{}, nil, logger, builtinMetrics) + engine, err := NewEngine(execScheduler, opts, lib.RuntimeOptions{}, nil, logger, registry) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1128,7 +1190,10 @@ func TestMinIterationDurationInSetupTeardownStage(t *testing.T) { func TestEngineRunsTeardownEvenAfterTestRunIsAborted(t *testing.T) { t.Parallel() - testMetric := stats.New("teardown_metric", stats.Counter) + + registry := metrics.NewRegistry() + testMetric, err := registry.NewMetric("teardown_metric", stats.Counter) + require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1144,9 +1209,9 @@ func TestEngineRunsTeardownEvenAfterTestRunIsAborted(t *testing.T) { } mockOutput := mockoutput.New() - _, run, wait := newTestEngine(t, ctx, runner, []output.Output{mockOutput}, lib.Options{ + _, run, wait := newTestEngineWithRegistry(t, ctx, runner, []output.Output{mockOutput}, lib.Options{ VUs: null.IntFrom(1), Iterations: null.IntFrom(1), - }) + }, registry) assert.NoError(t, run()) wait() @@ -1228,10 +1293,11 @@ func TestActiveVUsCount(t *testing.T) { require.NoError(t, err) require.Empty(t, opts.Validate()) require.NoError(t, runner.SetOptions(opts)) - execScheduler, err := local.NewExecutionScheduler(runner, logger) + execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) - engine, err := NewEngine(execScheduler, opts, rtOpts, []output.Output{mockOutput}, logger, builtinMetrics) + engine, err := NewEngine(execScheduler, opts, rtOpts, []output.Output{mockOutput}, logger, registry) require.NoError(t, err) + require.NoError(t, engine.OutputManager.StartOutputs()) run, waitFn, err := engine.Init(ctx, ctx) // no need for 2 different contexts require.NoError(t, err) @@ -1245,6 +1311,7 @@ func TestActiveVUsCount(t *testing.T) { require.NoError(t, err) cancel() waitFn() + engine.OutputManager.StopOutputs() require.False(t, engine.IsTainted()) } diff --git a/core/local/k6execution_test.go b/core/local/k6execution_test.go index feb1b7938fc..0d80e536f9b 100644 --- a/core/local/k6execution_test.go +++ b/core/local/k6execution_test.go @@ -33,9 +33,9 @@ import ( "github.com/stretchr/testify/require" "go.k6.io/k6/js" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" ) func TestExecutionInfoVUSharing(t *testing.T) { @@ -117,7 +117,7 @@ func TestExecutionInfoVUSharing(t *testing.T) { } errCh := make(chan error, 1) - go func() { errCh <- execScheduler.Run(ctx, ctx, samples, builtinMetrics) }() + go func() { errCh <- execScheduler.Run(ctx, ctx, samples) }() select { case err := <-errCh: @@ -216,7 +216,7 @@ func TestExecutionInfoScenarioIter(t *testing.T) { defer cancel() errCh := make(chan error, 1) - go func() { errCh <- execScheduler.Run(ctx, ctx, samples, builtinMetrics) }() + go func() { errCh <- execScheduler.Run(ctx, ctx, samples) }() scStats := map[string]uint64{} @@ -297,7 +297,7 @@ func TestSharedIterationsStable(t *testing.T) { defer cancel() errCh := make(chan error, 1) - go func() { errCh <- execScheduler.Run(ctx, ctx, samples, builtinMetrics) }() + go func() { errCh <- execScheduler.Run(ctx, ctx, samples) }() expIters := [50]int64{} for i := 0; i < 50; i++ { @@ -424,7 +424,7 @@ func TestExecutionInfoAll(t *testing.T) { defer cancel() errCh := make(chan error, 1) - go func() { errCh <- execScheduler.Run(ctx, ctx, samples, builtinMetrics) }() + go func() { errCh <- execScheduler.Run(ctx, ctx, samples) }() select { case err := <-errCh: diff --git a/core/local/local.go b/core/local/local.go index e4bd50a77f6..25f52b1d029 100644 --- a/core/local/local.go +++ b/core/local/local.go @@ -33,7 +33,7 @@ import ( "go.k6.io/k6/js/common" "go.k6.io/k6/lib" "go.k6.io/k6/lib/executor" - "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" "go.k6.io/k6/ui/pb" ) @@ -42,7 +42,7 @@ import ( type ExecutionScheduler struct { runner lib.Runner options lib.Options - logger *logrus.Logger + logger logrus.FieldLogger initProgress *pb.ProgressBar executorConfigs []lib.ExecutorConfig // sorted by (startTime, ID) @@ -51,6 +51,10 @@ type ExecutionScheduler struct { maxDuration time.Duration // cached value derived from the execution plan maxPossibleVUs uint64 // cached value derived from the execution plan state *lib.ExecutionState + + // TODO: remove these when we don't have separate Init() and Run() methods + // and can use a context + a WaitGroup (or something like that) + stopVUsEmission, vusEmissionStopped chan struct{} } // Check to see if we implement the lib.ExecutionScheduler interface @@ -60,7 +64,9 @@ var _ lib.ExecutionScheduler = &ExecutionScheduler{} // instance, without initializing it beyond the bare minimum. Specifically, it // creates the needed executor instances and a lot of state placeholders, but it // doesn't initialize the executors and it doesn't initialize or run VUs. -func NewExecutionScheduler(runner lib.Runner, logger *logrus.Logger) (*ExecutionScheduler, error) { +func NewExecutionScheduler( + runner lib.Runner, builtinMetrics *metrics.BuiltinMetrics, logger logrus.FieldLogger, +) (*ExecutionScheduler, error) { options := runner.GetOptions() et, err := lib.NewExecutionTuple(options.ExecutionSegment, options.ExecutionSegmentSequence) if err != nil { @@ -70,7 +76,7 @@ func NewExecutionScheduler(runner lib.Runner, logger *logrus.Logger) (*Execution maxPlannedVUs := lib.GetMaxPlannedVUs(executionPlan) maxPossibleVUs := lib.GetMaxPossibleVUs(executionPlan) - executionState := lib.NewExecutionState(options, et, maxPlannedVUs, maxPossibleVUs) + executionState := lib.NewExecutionState(options, et, builtinMetrics, maxPlannedVUs, maxPossibleVUs) maxDuration, _ := lib.GetEndOffset(executionPlan) // we don't care if the end offset is final executorConfigs := options.Scenarios.GetSortedConfigs() @@ -112,6 +118,9 @@ func NewExecutionScheduler(runner lib.Runner, logger *logrus.Logger) (*Execution maxDuration: maxDuration, maxPossibleVUs: maxPossibleVUs, state: executionState, + + stopVUsEmission: make(chan struct{}), + vusEmissionStopped: make(chan struct{}), }, nil } @@ -225,11 +234,58 @@ func (e *ExecutionScheduler) initVUsConcurrently( return doneInits } +func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- stats.SampleContainer) { + e.logger.Debug("Starting emission of VUs and VUsMax metrics...") + + emitMetrics := func() { + t := time.Now() + samples := stats.ConnectedSamples{ + Samples: []stats.Sample{ + { + Time: t, + Metric: e.state.BuiltinMetrics.VUs, + Value: float64(e.state.GetCurrentlyActiveVUsCount()), + Tags: e.options.RunTags, + }, { + Time: t, + Metric: e.state.BuiltinMetrics.VUsMax, + Value: float64(e.state.GetInitializedVUsCount()), + Tags: e.options.RunTags, + }, + }, + Tags: e.options.RunTags, + Time: t, + } + stats.PushIfNotDone(ctx, out, samples) + } + + ticker := time.NewTicker(1 * time.Second) + go func() { + defer func() { + ticker.Stop() + e.logger.Debug("Metrics emission of VUs and VUsMax metrics stopped") + close(e.vusEmissionStopped) + }() + + for { + select { + case <-ticker.C: + emitMetrics() + case <-ctx.Done(): + return + case <-e.stopVUsEmission: + return + } + } + }() +} + // Init concurrently initializes all of the planned VUs and then sequentially // initializes all of the configured executors. func (e *ExecutionScheduler) Init(ctx context.Context, samplesOut chan<- stats.SampleContainer) error { - logger := e.logger.WithField("phase", "local-execution-scheduler-init") + e.emitVUsAndVUsMax(ctx, samplesOut) + logger := e.logger.WithField("phase", "local-execution-scheduler-init") vusToInitialize := lib.GetMaxPlannedVUs(e.executionPlan) logger.WithFields(logrus.Fields{ "neededVUs": vusToInitialize, @@ -293,7 +349,6 @@ func (e *ExecutionScheduler) Init(ctx context.Context, samplesOut chan<- stats.S // method. func (e *ExecutionScheduler) runExecutor( runCtx context.Context, runResults chan<- error, engineOut chan<- stats.SampleContainer, executor lib.Executor, - builtinMetrics *metrics.BuiltinMetrics, ) { executorConfig := executor.GetConfig() executorStartTime := executorConfig.GetStartTime() @@ -330,7 +385,7 @@ func (e *ExecutionScheduler) runExecutor( pb.WithConstProgress(0, "started"), ) executorLogger.Debugf("Starting executor") - err := executor.Run(runCtx, engineOut, builtinMetrics) // executor should handle context cancel itself + err := executor.Run(runCtx, engineOut) // executor should handle context cancel itself if err == nil { executorLogger.Debugf("Executor finished successfully") } else { @@ -341,10 +396,13 @@ func (e *ExecutionScheduler) runExecutor( // Run the ExecutionScheduler, funneling all generated metric samples through the supplied // out channel. -//nolint:cyclop -func (e *ExecutionScheduler) Run( - globalCtx, runCtx context.Context, engineOut chan<- stats.SampleContainer, builtinMetrics *metrics.BuiltinMetrics, -) error { +//nolint:funlen +func (e *ExecutionScheduler) Run(globalCtx, runCtx context.Context, engineOut chan<- stats.SampleContainer) error { + defer func() { + close(e.stopVUsEmission) + <-e.vusEmissionStopped + }() + executorsCount := len(e.executors) logger := e.logger.WithField("phase", "local-execution-scheduler-run") e.initProgress.Modify(pb.WithConstLeft("Run")) @@ -401,7 +459,7 @@ func (e *ExecutionScheduler) Run( // This is for addressing test.abort(). execCtx := executor.Context(runSubCtx) for _, exec := range e.executors { - go e.runExecutor(execCtx, runResults, engineOut, exec, builtinMetrics) + go e.runExecutor(execCtx, runResults, engineOut, exec) } // Wait for all executors to finish diff --git a/core/local/local_test.go b/core/local/local_test.go index 33c04a72382..496a3c31135 100644 --- a/core/local/local_test.go +++ b/core/local/local_test.go @@ -42,7 +42,6 @@ import ( "go.k6.io/k6/js" "go.k6.io/k6/lib" "go.k6.io/k6/lib/executor" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/netext" "go.k6.io/k6/lib/netext/httpext" "go.k6.io/k6/lib/testutils" @@ -51,6 +50,7 @@ import ( "go.k6.io/k6/lib/testutils/mockresolver" "go.k6.io/k6/lib/types" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) @@ -73,8 +73,10 @@ func newTestExecutionScheduler( logger = logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) } + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err = NewExecutionScheduler(runner, logger) + execScheduler, err = NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) samples = make(chan stats.SampleContainer, newOpts.MetricSamplesBufferSize.Int64) @@ -99,9 +101,7 @@ func TestExecutionSchedulerRun(t *testing.T) { defer cancel() err := make(chan error, 1) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - go func() { err <- execScheduler.Run(ctx, ctx, samples, builtinMetrics) }() + go func() { err <- execScheduler.Run(ctx, ctx, samples) }() assert.NoError(t, <-err) } @@ -140,7 +140,7 @@ func TestExecutionSchedulerRunNonDefault(t *testing.T) { nil, lib.RuntimeOptions{}, builtinMetrics, registry) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, logger) + execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -154,7 +154,7 @@ func TestExecutionSchedulerRunNonDefault(t *testing.T) { assert.EqualError(t, err, tc.expErr) } else { assert.NoError(t, err) - assert.NoError(t, execScheduler.Run(ctx, ctx, samples, builtinMetrics)) + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) } close(done) }() @@ -252,7 +252,7 @@ func TestExecutionSchedulerRunEnv(t *testing.T) { nil, lib.RuntimeOptions{Env: map[string]string{"TESTVAR": "global"}}, builtinMetrics, registry) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, logger) + execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -262,7 +262,7 @@ func TestExecutionSchedulerRunEnv(t *testing.T) { samples := make(chan stats.SampleContainer) go func() { assert.NoError(t, execScheduler.Init(ctx, samples)) - assert.NoError(t, execScheduler.Run(ctx, ctx, samples, builtinMetrics)) + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) close(done) }() for { @@ -323,7 +323,7 @@ func TestExecutionSchedulerSystemTags(t *testing.T) { SystemTags: &stats.DefaultSystemTagSet, }))) - execScheduler, err := NewExecutionScheduler(runner, logger) + execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) @@ -334,7 +334,7 @@ func TestExecutionSchedulerSystemTags(t *testing.T) { go func() { defer close(done) require.NoError(t, execScheduler.Init(ctx, samples)) - require.NoError(t, execScheduler.Run(ctx, ctx, samples, builtinMetrics)) + require.NoError(t, execScheduler.Run(ctx, ctx, samples)) }() expCommonTrailTags := stats.IntoSampleTags(&map[string]string{ @@ -460,7 +460,7 @@ func TestExecutionSchedulerRunCustomTags(t *testing.T) { nil, lib.RuntimeOptions{}, builtinMetrics, registry) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, logger) + execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) @@ -471,7 +471,7 @@ func TestExecutionSchedulerRunCustomTags(t *testing.T) { go func() { defer close(done) require.NoError(t, execScheduler.Init(ctx, samples)) - require.NoError(t, execScheduler.Run(ctx, ctx, samples, builtinMetrics)) + require.NoError(t, execScheduler.Run(ctx, ctx, samples)) }() var gotTrailTag, gotNetTrailTag bool for { @@ -623,7 +623,7 @@ func TestExecutionSchedulerRunCustomConfigNoCrossover(t *testing.T) { nil, lib.RuntimeOptions{Env: map[string]string{"TESTGLOBALVAR": "global"}}, builtinMetrics, registry) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, logger) + execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) @@ -632,7 +632,7 @@ func TestExecutionSchedulerRunCustomConfigNoCrossover(t *testing.T) { samples := make(chan stats.SampleContainer) go func() { assert.NoError(t, execScheduler.Init(ctx, samples)) - assert.NoError(t, execScheduler.Run(ctx, ctx, samples, builtinMetrics)) + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) close(samples) }() @@ -694,8 +694,6 @@ func TestExecutionSchedulerRunCustomConfigNoCrossover(t *testing.T) { func TestExecutionSchedulerSetupTeardownRun(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) t.Run("Normal", func(t *testing.T) { t.Parallel() setupC := make(chan struct{}) @@ -713,7 +711,7 @@ func TestExecutionSchedulerSetupTeardownRun(t *testing.T) { ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{}) err := make(chan error, 1) - go func() { err <- execScheduler.Run(ctx, ctx, samples, builtinMetrics) }() + go func() { err <- execScheduler.Run(ctx, ctx, samples) }() defer cancel() <-setupC <-teardownC @@ -728,7 +726,7 @@ func TestExecutionSchedulerSetupTeardownRun(t *testing.T) { } ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{}) defer cancel() - assert.EqualError(t, execScheduler.Run(ctx, ctx, samples, builtinMetrics), "setup error") + assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "setup error") }) t.Run("Don't Run Setup", func(t *testing.T) { t.Parallel() @@ -746,7 +744,7 @@ func TestExecutionSchedulerSetupTeardownRun(t *testing.T) { Iterations: null.IntFrom(1), }) defer cancel() - assert.EqualError(t, execScheduler.Run(ctx, ctx, samples, builtinMetrics), "teardown error") + assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "teardown error") }) t.Run("Teardown Error", func(t *testing.T) { @@ -765,7 +763,7 @@ func TestExecutionSchedulerSetupTeardownRun(t *testing.T) { }) defer cancel() - assert.EqualError(t, execScheduler.Run(ctx, ctx, samples, builtinMetrics), "teardown error") + assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "teardown error") }) t.Run("Don't Run Teardown", func(t *testing.T) { t.Parallel() @@ -783,7 +781,7 @@ func TestExecutionSchedulerSetupTeardownRun(t *testing.T) { Iterations: null.IntFrom(1), }) defer cancel() - assert.NoError(t, execScheduler.Run(ctx, ctx, samples, builtinMetrics)) + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) }) } @@ -812,8 +810,6 @@ func TestExecutionSchedulerStages(t *testing.T) { }, }, } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) for name, data := range testdata { data := data @@ -830,7 +826,7 @@ func TestExecutionSchedulerStages(t *testing.T) { Stages: data.Stages, }) defer cancel() - assert.NoError(t, execScheduler.Run(ctx, ctx, samples, builtinMetrics)) + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) assert.True(t, execScheduler.GetState().GetCurrentTestRunDuration() >= data.Duration) }) } @@ -855,9 +851,7 @@ func TestExecutionSchedulerEndTime(t *testing.T) { assert.True(t, isFinal) startTime := time.Now() - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - assert.NoError(t, execScheduler.Run(ctx, ctx, samples, builtinMetrics)) + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) runTime := time.Since(startTime) assert.True(t, runTime > 1*time.Second, "test did not take 1s") assert.True(t, runTime < 10*time.Second, "took more than 10 seconds") @@ -883,10 +877,8 @@ func TestExecutionSchedulerRuntimeErrors(t *testing.T) { assert.Equal(t, 31*time.Second, endTime) // because of the default 30s gracefulStop assert.True(t, isFinal) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) startTime := time.Now() - assert.NoError(t, execScheduler.Run(ctx, ctx, samples, builtinMetrics)) + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) runTime := time.Since(startTime) assert.True(t, runTime > 1*time.Second, "test did not take 1s") assert.True(t, runTime < 10*time.Second, "took more than 10 seconds") @@ -922,10 +914,8 @@ func TestExecutionSchedulerEndErrors(t *testing.T) { assert.Equal(t, 1*time.Second, endTime) // because of the 0s gracefulStop assert.True(t, isFinal) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) startTime := time.Now() - assert.NoError(t, execScheduler.Run(ctx, ctx, samples, builtinMetrics)) + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) runTime := time.Since(startTime) assert.True(t, runTime > 1*time.Second, "test did not take 1s") assert.True(t, runTime < 10*time.Second, "took more than 10 seconds") @@ -964,14 +954,14 @@ func TestExecutionSchedulerEndIterations(t *testing.T) { logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) - execScheduler, err := NewExecutionScheduler(runner, logger) - require.NoError(t, err) - registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + require.NoError(t, err) + samples := make(chan stats.SampleContainer, 300) require.NoError(t, execScheduler.Init(ctx, samples)) - require.NoError(t, execScheduler.Run(ctx, ctx, samples, builtinMetrics)) + require.NoError(t, execScheduler.Run(ctx, ctx, samples)) assert.Equal(t, uint64(100), execScheduler.GetState().GetFullIterationCount()) assert.Equal(t, uint64(0), execScheduler.GetState().GetPartialIterationCount()) @@ -996,9 +986,7 @@ func TestExecutionSchedulerIsRunning(t *testing.T) { state := execScheduler.GetState() err := make(chan error) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - go func() { err <- execScheduler.Run(ctx, ctx, nil, builtinMetrics) }() + go func() { err <- execScheduler.Run(ctx, ctx, nil) }() for !state.HasStarted() { time.Sleep(10 * time.Microsecond) } @@ -1094,7 +1082,7 @@ func TestDNSResolver(t *testing.T) { defer mr.Unset("myhost") errCh := make(chan error, 1) - go func() { errCh <- execScheduler.Run(ctx, ctx, samples, builtinMetrics) }() + go func() { errCh <- execScheduler.Run(ctx, ctx, samples) }() select { case err := <-errCh: @@ -1172,7 +1160,7 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { require.NoError(t, err) require.NoError(t, runner.SetOptions(options)) - execScheduler, err := NewExecutionScheduler(runner, logger) + execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1182,7 +1170,7 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { sampleContainers := make(chan stats.SampleContainer) go func() { require.NoError(t, execScheduler.Init(ctx, sampleContainers)) - assert.NoError(t, execScheduler.Run(ctx, ctx, sampleContainers, builtinMetrics)) + assert.NoError(t, execScheduler.Run(ctx, ctx, sampleContainers)) close(done) }() @@ -1193,6 +1181,17 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { for { select { case sampleContainer := <-sampleContainers: + gotVus := false + for _, s := range sampleContainer.GetSamples() { + if s.Metric == builtinMetrics.VUs || s.Metric == builtinMetrics.VUsMax { + gotVus = true + break + } + } + if gotVus { + continue + } + now := time.Now() elapsed := now.Sub(start) if elapsed < from { @@ -1293,7 +1292,9 @@ func TestSetPaused(t *testing.T) { runner := &minirunner.MiniRunner{} logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) - sched, err := NewExecutionScheduler(runner, logger) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} @@ -1308,7 +1309,9 @@ func TestSetPaused(t *testing.T) { runner := &minirunner.MiniRunner{} logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) - sched, err := NewExecutionScheduler(runner, logger) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} err = sched.SetPaused(false) @@ -1321,7 +1324,9 @@ func TestSetPaused(t *testing.T) { runner := &minirunner.MiniRunner{} logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) - sched, err := NewExecutionScheduler(runner, logger) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} require.NoError(t, sched.SetPaused(true)) @@ -1336,7 +1341,9 @@ func TestSetPaused(t *testing.T) { runner := &minirunner.MiniRunner{} logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) - sched, err := NewExecutionScheduler(runner, logger) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) expectedErr := errors.New("testing pausable executor error") sched.executors = []lib.Executor{pausableExecutor{err: expectedErr}} @@ -1357,7 +1364,9 @@ func TestSetPaused(t *testing.T) { logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) - sched, err := NewExecutionScheduler(runner, logger) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) err = sched.SetPaused(true) require.Error(t, err) @@ -1418,7 +1427,7 @@ func TestNewExecutionSchedulerHasWork(t *testing.T) { ) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, logger) + execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) require.NoError(t, err) assert.Len(t, execScheduler.executors, 2) diff --git a/js/bundle.go b/js/bundle.go index dba20a7d8e7..38266d15c09 100644 --- a/js/bundle.go +++ b/js/bundle.go @@ -39,8 +39,8 @@ import ( "go.k6.io/k6/js/eventloop" "go.k6.io/k6/lib" "go.k6.io/k6/lib/consts" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" ) // A Bundle is a self-contained bundle of scripts and resources. diff --git a/js/bundle_test.go b/js/bundle_test.go index 6f8f8b9875b..ddad4833736 100644 --- a/js/bundle_test.go +++ b/js/bundle_test.go @@ -42,10 +42,10 @@ import ( "go.k6.io/k6/lib" "go.k6.io/k6/lib/consts" "go.k6.io/k6/lib/fsext" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/types" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" ) const isWindows = runtime.GOOS == "windows" diff --git a/js/common/initenv.go b/js/common/initenv.go index 793445ffa5a..0bf3b051914 100644 --- a/js/common/initenv.go +++ b/js/common/initenv.go @@ -26,7 +26,7 @@ import ( "github.com/sirupsen/logrus" "github.com/spf13/afero" - "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/metrics" ) // InitEnvironment contains properties that can be accessed by Go code executed diff --git a/js/console_test.go b/js/console_test.go index 51827517022..fc2bc220c1f 100644 --- a/js/console_test.go +++ b/js/console_test.go @@ -37,9 +37,9 @@ import ( "go.k6.io/k6/js/common" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/js/init_and_modules_test.go b/js/init_and_modules_test.go index 3c1d3ba5108..7e7c9fe7b0f 100644 --- a/js/init_and_modules_test.go +++ b/js/init_and_modules_test.go @@ -36,9 +36,9 @@ import ( "go.k6.io/k6/js" "go.k6.io/k6/js/modules" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/js/initcontext_test.go b/js/initcontext_test.go index adf1a4b5e4c..8a941f42171 100644 --- a/js/initcontext_test.go +++ b/js/initcontext_test.go @@ -40,10 +40,10 @@ import ( "go.k6.io/k6/lib" "go.k6.io/k6/lib/consts" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/netext" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/types" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/js/module_loading_test.go b/js/module_loading_test.go index 0de867a5547..ae601dbe09e 100644 --- a/js/module_loading_test.go +++ b/js/module_loading_test.go @@ -31,9 +31,9 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/httpmultibin" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/js/modules/k6/grpc/client_test.go b/js/modules/k6/grpc/client_test.go index 2b226c6964c..d34c1fbb2c2 100644 --- a/js/modules/k6/grpc/client_test.go +++ b/js/modules/k6/grpc/client_test.go @@ -56,9 +56,9 @@ import ( "go.k6.io/k6/js/modulestest" "go.k6.io/k6/lib" "go.k6.io/k6/lib/fsext" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/httpmultibin" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/js/modules/k6/html/html_test.go b/js/modules/k6/html/html_test.go index 3851150cf8f..6fec3cbe685 100644 --- a/js/modules/k6/html/html_test.go +++ b/js/modules/k6/html/html_test.go @@ -30,7 +30,7 @@ import ( "go.k6.io/k6/js/common" "go.k6.io/k6/js/modulestest" - "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/metrics" ) const testHTML = ` diff --git a/js/modules/k6/http/http_test.go b/js/modules/k6/http/http_test.go index 3dcb0102d33..18904a39b54 100644 --- a/js/modules/k6/http/http_test.go +++ b/js/modules/k6/http/http_test.go @@ -31,8 +31,8 @@ import ( "go.k6.io/k6/js/common" "go.k6.io/k6/js/modulestest" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/netext/httpext" + "go.k6.io/k6/metrics" ) //nolint: golint, revive diff --git a/js/modules/k6/http/request_test.go b/js/modules/k6/http/request_test.go index e9cc9c37e0b..d98f447f37a 100644 --- a/js/modules/k6/http/request_test.go +++ b/js/modules/k6/http/request_test.go @@ -50,9 +50,9 @@ import ( "go.k6.io/k6/js/modulestest" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/httpmultibin" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/js/modules/k6/http/response_callback_test.go b/js/modules/k6/http/response_callback_test.go index f0438513ec8..79c23615dcc 100644 --- a/js/modules/k6/http/response_callback_test.go +++ b/js/modules/k6/http/response_callback_test.go @@ -29,7 +29,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/js/modules/k6/k6_test.go b/js/modules/k6/k6_test.go index 4cc853c8668..00c39927506 100644 --- a/js/modules/k6/k6_test.go +++ b/js/modules/k6/k6_test.go @@ -33,7 +33,7 @@ import ( "go.k6.io/k6/js/common" "go.k6.io/k6/js/modulestest" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/js/modules/k6/marshalling_test.go b/js/modules/k6/marshalling_test.go index 6992e2d9261..809b392531f 100644 --- a/js/modules/k6/marshalling_test.go +++ b/js/modules/k6/marshalling_test.go @@ -31,11 +31,11 @@ import ( "go.k6.io/k6/js" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/httpmultibin" "go.k6.io/k6/lib/types" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/js/modules/k6/metrics/metrics_test.go b/js/modules/k6/metrics/metrics_test.go index 3dd7c0c2c63..cfd5547880c 100644 --- a/js/modules/k6/metrics/metrics_test.go +++ b/js/modules/k6/metrics/metrics_test.go @@ -34,8 +34,8 @@ import ( "go.k6.io/k6/js/common" "go.k6.io/k6/js/modulestest" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/js/modules/k6/ws/ws.go b/js/modules/k6/ws/ws.go index 944f79b6184..0bcc011136f 100644 --- a/js/modules/k6/ws/ws.go +++ b/js/modules/k6/ws/ws.go @@ -39,7 +39,7 @@ import ( "go.k6.io/k6/js/common" "go.k6.io/k6/js/modules" httpModule "go.k6.io/k6/js/modules/k6/http" - "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/js/modules/k6/ws/ws_test.go b/js/modules/k6/ws/ws_test.go index 7e3a259de3b..84c031461ae 100644 --- a/js/modules/k6/ws/ws_test.go +++ b/js/modules/k6/ws/ws_test.go @@ -42,8 +42,8 @@ import ( httpModule "go.k6.io/k6/js/modules/k6/http" "go.k6.io/k6/js/modulestest" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils/httpmultibin" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/js/runner.go b/js/runner.go index f15d071ecc4..f352d9cf6d6 100644 --- a/js/runner.go +++ b/js/runner.go @@ -51,10 +51,10 @@ import ( "go.k6.io/k6/js/eventloop" "go.k6.io/k6/lib" "go.k6.io/k6/lib/consts" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/netext" "go.k6.io/k6/lib/types" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/js/runner_test.go b/js/runner_test.go index f828095bf3b..fa6c2ddd215 100644 --- a/js/runner_test.go +++ b/js/runner_test.go @@ -56,12 +56,12 @@ import ( "go.k6.io/k6/lib" _ "go.k6.io/k6/lib/executor" // TODO: figure out something better "go.k6.io/k6/lib/fsext" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/httpmultibin" "go.k6.io/k6/lib/testutils/mockoutput" "go.k6.io/k6/lib/types" "go.k6.io/k6/loader" + "go.k6.io/k6/metrics" "go.k6.io/k6/output" "go.k6.io/k6/stats" ) @@ -304,16 +304,18 @@ func TestSetupDataIsolation(t *testing.T) { options := runner.GetOptions() require.Empty(t, options.Validate()) - execScheduler, err := local.NewExecutionScheduler(runner, testutils.NewLogger(t)) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, testutils.NewLogger(t)) require.NoError(t, err) mockOutput := mockoutput.New() - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) engine, err := core.NewEngine( - execScheduler, options, lib.RuntimeOptions{}, []output.Output{mockOutput}, testutils.NewLogger(t), builtinMetrics, + execScheduler, options, lib.RuntimeOptions{}, []output.Output{mockOutput}, testutils.NewLogger(t), registry, ) require.NoError(t, err) + require.NoError(t, engine.OutputManager.StartOutputs()) + defer engine.OutputManager.StopOutputs() ctx, cancel := context.WithCancel(context.Background()) run, wait, err := engine.Init(ctx, ctx) @@ -2327,7 +2329,9 @@ func TestExecutionInfo(t *testing.T) { initVU, err := r.NewVU(1, 10, samples) require.NoError(t, err) - execScheduler, err := local.NewExecutionScheduler(r, testutils.NewLogger(t)) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + execScheduler, err := local.NewExecutionScheduler(r, builtinMetrics, testutils.NewLogger(t)) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) diff --git a/js/share_test.go b/js/share_test.go index 20cbcc6c909..fadfe122436 100644 --- a/js/share_test.go +++ b/js/share_test.go @@ -30,8 +30,8 @@ import ( "github.com/stretchr/testify/require" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/lib/execution.go b/lib/execution.go index 14c090414ed..69c8b6c4dc3 100644 --- a/lib/execution.go +++ b/lib/execution.go @@ -30,7 +30,7 @@ import ( "github.com/sirupsen/logrus" - "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) @@ -62,10 +62,7 @@ type ExecutionScheduler interface { // Run the ExecutionScheduler, funneling the generated metric samples // through the supplied out channel. - Run( - globalCtx, runCtx context.Context, samplesOut chan<- stats.SampleContainer, - builtinMetrics *metrics.BuiltinMetrics, - ) error + Run(globalCtx, runCtx context.Context, samplesOut chan<- stats.SampleContainer) error // Pause a test, or start/resume it. To check if a test is paused, use // GetState().IsPaused(). @@ -156,6 +153,8 @@ type ExecutionState struct { ExecutionTuple *ExecutionTuple // TODO Rename, possibly move + BuiltinMetrics *metrics.BuiltinMetrics + // vus is the shared channel buffer that contains all of the VUs that have // been initialized and aren't currently being used by a executor. // @@ -277,7 +276,10 @@ type ExecutionState struct { // NewExecutionState initializes all of the pointers in the ExecutionState // with zeros. It also makes sure that the initial state is unpaused, by // setting resumeNotify to an already closed channel. -func NewExecutionState(options Options, et *ExecutionTuple, maxPlannedVUs, maxPossibleVUs uint64) *ExecutionState { +func NewExecutionState( + options Options, et *ExecutionTuple, builtinMetrics *metrics.BuiltinMetrics, + maxPlannedVUs, maxPossibleVUs uint64, +) *ExecutionState { resumeNotify := make(chan struct{}) close(resumeNotify) // By default the ExecutionState starts unpaused @@ -285,8 +287,11 @@ func NewExecutionState(options Options, et *ExecutionTuple, maxPlannedVUs, maxPo segIdx := NewSegmentedIndex(et) return &ExecutionState{ - Options: options, - vus: make(chan InitializedVU, maxPossibleVUs), + Options: options, + ExecutionTuple: et, + BuiltinMetrics: builtinMetrics, + + vus: make(chan InitializedVU, maxPossibleVUs), executionStatus: new(uint32), vuIDSegIndexMx: new(sync.Mutex), @@ -302,7 +307,6 @@ func NewExecutionState(options Options, et *ExecutionTuple, maxPlannedVUs, maxPo pauseStateLock: sync.RWMutex{}, totalPausedDuration: 0, // Accessed only behind the pauseStateLock resumeNotify: resumeNotify, - ExecutionTuple: et, } } diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go index ce5c13f8cf8..d47561d5bd9 100644 --- a/lib/executor/constant_arrival_rate.go +++ b/lib/executor/constant_arrival_rate.go @@ -33,7 +33,6 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/types" "go.k6.io/k6/stats" "go.k6.io/k6/ui/pb" @@ -212,9 +211,7 @@ func (car *ConstantArrivalRate) Init(ctx context.Context) error { // This will allow us to implement https://github.com/k6io/k6/issues/1386 // and things like all of the TODOs below in one place only. //nolint:funlen,cyclop -func (car ConstantArrivalRate) Run( - parentCtx context.Context, out chan<- stats.SampleContainer, builtinMetrics *metrics.BuiltinMetrics, -) (err error) { +func (car ConstantArrivalRate) Run(parentCtx context.Context, out chan<- stats.SampleContainer) (err error) { gracefulStop := car.config.GetGracefulStop() duration := car.config.Duration.TimeDuration() preAllocatedVUs := car.config.GetPreAllocatedVUs(car.executionState.ExecutionTuple) @@ -332,7 +329,7 @@ func (car ConstantArrivalRate) Run( int64(car.config.TimeUnit.TimeDuration()), )).TimeDuration() - droppedIterationMetric := builtinMetrics.DroppedIterations + droppedIterationMetric := car.executionState.BuiltinMetrics.DroppedIterations shownWarning := false metricTags := car.getMetricTags(nil) for li, gi := 0, start; ; li, gi = li+1, gi+offsets[li%len(offsets)] { diff --git a/lib/executor/constant_arrival_rate_test.go b/lib/executor/constant_arrival_rate_test.go index 8054d193597..563789aaf4c 100644 --- a/lib/executor/constant_arrival_rate_test.go +++ b/lib/executor/constant_arrival_rate_test.go @@ -34,9 +34,9 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) @@ -71,7 +71,10 @@ func TestConstantArrivalRateRunNotEnoughAllocatedVUsWarn(t *testing.T) { t.Parallel() et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) ctx, cancel, executor, logHook := setupExecutor( t, getTestConstantArrivalRateConfig(), es, simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -81,10 +84,7 @@ func TestConstantArrivalRateRunNotEnoughAllocatedVUsWarn(t *testing.T) { ) defer cancel() engineOut := make(chan stats.SampleContainer, 1000) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - - err = executor.Run(ctx, engineOut, builtinMetrics) + err = executor.Run(ctx, engineOut) require.NoError(t, err) entries := logHook.Drain() require.NotEmpty(t, entries) @@ -101,7 +101,9 @@ func TestConstantArrivalRateRunCorrectRate(t *testing.T) { var count int64 et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) ctx, cancel, executor, logHook := setupExecutor( t, getTestConstantArrivalRateConfig(), es, simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -124,9 +126,7 @@ func TestConstantArrivalRateRunCorrectRate(t *testing.T) { } }() engineOut := make(chan stats.SampleContainer, 1000) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, engineOut, builtinMetrics) + err = executor.Run(ctx, engineOut) wg.Wait() require.NoError(t, err) require.Empty(t, logHook.Drain()) @@ -199,7 +199,7 @@ func TestConstantArrivalRateRunCorrectTiming(t *testing.T) { es := lib.NewExecutionState(lib.Options{ ExecutionSegment: test.segment, ExecutionSegmentSequence: test.sequence, - }, et, 10, 50) + }, et, builtinMetrics, 10, 50) var count int64 seconds := 2 config := getTestConstantArrivalRateConfig() @@ -249,7 +249,7 @@ func TestConstantArrivalRateRunCorrectTiming(t *testing.T) { }() startTime = time.Now() engineOut := make(chan stats.SampleContainer, 1000) - err = executor.Run(ctx, engineOut, builtinMetrics) + err = executor.Run(ctx, engineOut) wg.Wait() require.NoError(t, err) require.Empty(t, logHook.Drain()) @@ -275,7 +275,7 @@ func TestArrivalRateCancel(t *testing.T) { weAreDoneCh := make(chan struct{}) et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) ctx, cancel, executor, logHook := setupExecutor( t, config, es, simpleRunner(func(ctx context.Context, _ *lib.State) error { select { @@ -292,7 +292,7 @@ func TestArrivalRateCancel(t *testing.T) { defer wg.Done() engineOut := make(chan stats.SampleContainer, 1000) - errCh <- executor.Run(ctx, engineOut, builtinMetrics) + errCh <- executor.Run(ctx, engineOut) close(weAreDoneCh) }() @@ -329,7 +329,9 @@ func TestConstantArrivalRateDroppedIterations(t *testing.T) { MaxVUs: null.IntFrom(5), } - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) ctx, cancel, executor, logHook := setupExecutor( t, config, es, simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -340,9 +342,7 @@ func TestConstantArrivalRateDroppedIterations(t *testing.T) { ) defer cancel() engineOut := make(chan stats.SampleContainer, 1000) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, engineOut, builtinMetrics) + err = executor.Run(ctx, engineOut) require.NoError(t, err) logs := logHook.Drain() require.Len(t, logs, 1) @@ -384,7 +384,7 @@ func TestConstantArrivalRateGlobalIters(t *testing.T) { require.NoError(t, err) et, err := lib.NewExecutionTuple(seg, &ess) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 5, 5) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 5, 5) runner := &minirunner.MiniRunner{} ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) @@ -400,7 +400,7 @@ func TestConstantArrivalRateGlobalIters(t *testing.T) { } engineOut := make(chan stats.SampleContainer, 100) - err = executor.Run(ctx, engineOut, builtinMetrics) + err = executor.Run(ctx, engineOut) require.NoError(t, err) assert.Equal(t, tc.expIters, gotIters) }) diff --git a/lib/executor/constant_vus.go b/lib/executor/constant_vus.go index 16f2f79d400..83e35c47c42 100644 --- a/lib/executor/constant_vus.go +++ b/lib/executor/constant_vus.go @@ -30,7 +30,6 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/types" "go.k6.io/k6/stats" "go.k6.io/k6/ui/pb" @@ -143,9 +142,7 @@ var _ lib.Executor = &ConstantVUs{} // Run constantly loops through as many iterations as possible on a fixed number // of VUs for the specified duration. -func (clv ConstantVUs) Run( - parentCtx context.Context, out chan<- stats.SampleContainer, _ *metrics.BuiltinMetrics, -) (err error) { +func (clv ConstantVUs) Run(parentCtx context.Context, out chan<- stats.SampleContainer) (err error) { numVUs := clv.config.GetVUs(clv.executionState.ExecutionTuple) duration := clv.config.Duration.TimeDuration() gracefulStop := clv.config.GetGracefulStop() diff --git a/lib/executor/constant_vus_test.go b/lib/executor/constant_vus_test.go index b4d33a5b0a0..4eb8306a171 100644 --- a/lib/executor/constant_vus_test.go +++ b/lib/executor/constant_vus_test.go @@ -47,7 +47,7 @@ func TestConstantVUsRun(t *testing.T) { var result sync.Map et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) ctx, cancel, executor, _ := setupExecutor( t, getTestConstantVUsConfig(), es, simpleRunner(func(ctx context.Context, state *lib.State) error { @@ -63,7 +63,7 @@ func TestConstantVUsRun(t *testing.T) { }), ) defer cancel() - err = executor.Run(ctx, nil, nil) + err = executor.Run(ctx, nil) require.NoError(t, err) var totalIters uint64 diff --git a/lib/executor/execution_test.go b/lib/executor/execution_test.go index 91c9c3029b4..44724531211 100644 --- a/lib/executor/execution_test.go +++ b/lib/executor/execution_test.go @@ -61,7 +61,7 @@ func TestExecutionStateVUIDs(t *testing.T) { require.NoError(t, err) start, offsets, _ := et.GetStripedOffsets() - es := lib.NewExecutionState(lib.Options{}, et, 0, 0) + es := lib.NewExecutionState(lib.Options{}, et, nil, 0, 0) idl, idg := es.GetUniqueVUIdentifiers() assert.Equal(t, uint64(1), idl) @@ -102,7 +102,7 @@ func TestExecutionStateGettingVUsWhenNonAreAvailable(t *testing.T) { t.Parallel() et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 0, 0) + es := lib.NewExecutionState(lib.Options{}, et, nil, 0, 0) logHook := &testutils.SimpleLogrusHook{HookedLevels: []logrus.Level{logrus.WarnLevel}} testLog := logrus.New() testLog.AddHook(logHook) @@ -128,7 +128,7 @@ func TestExecutionStateGettingVUs(t *testing.T) { et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 20) + es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 20) es.SetInitVUFunc(func(_ context.Context, _ *logrus.Entry) (lib.InitializedVU, error) { return &minirunner.VU{}, nil }) @@ -193,7 +193,7 @@ func TestMarkStartedPanicsOnSecondRun(t *testing.T) { t.Parallel() et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 0, 0) + es := lib.NewExecutionState(lib.Options{}, et, nil, 0, 0) require.False(t, es.HasStarted()) es.MarkStarted() require.True(t, es.HasStarted()) @@ -204,7 +204,7 @@ func TestMarkEnded(t *testing.T) { t.Parallel() et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 0, 0) + es := lib.NewExecutionState(lib.Options{}, et, nil, 0, 0) require.False(t, es.HasEnded()) es.MarkEnded() require.True(t, es.HasEnded()) diff --git a/lib/executor/externally_controlled.go b/lib/executor/externally_controlled.go index 5494cc9cb1b..63cd31b07f6 100644 --- a/lib/executor/externally_controlled.go +++ b/lib/executor/externally_controlled.go @@ -33,7 +33,6 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/types" "go.k6.io/k6/stats" "go.k6.io/k6/ui/pb" @@ -500,9 +499,7 @@ func (rs *externallyControlledRunState) handleConfigChange(oldCfg, newCfg Extern // dynamically controlled number of VUs either for the specified duration, or // until the test is manually stopped. // nolint:funlen,gocognit,cyclop -func (mex *ExternallyControlled) Run( - parentCtx context.Context, out chan<- stats.SampleContainer, _ *metrics.BuiltinMetrics, -) (err error) { +func (mex *ExternallyControlled) Run(parentCtx context.Context, out chan<- stats.SampleContainer) (err error) { mex.configLock.RLock() // Safely get the current config - it's important that the close of the // hasStarted channel is inside of the lock, so that there are no data races diff --git a/lib/executor/externally_controlled_test.go b/lib/executor/externally_controlled_test.go index 8b777b3960a..19cbd78bed1 100644 --- a/lib/executor/externally_controlled_test.go +++ b/lib/executor/externally_controlled_test.go @@ -50,7 +50,7 @@ func TestExternallyControlledRun(t *testing.T) { et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) doneIters := new(uint64) ctx, cancel, executor, _ := setupExecutor( @@ -72,7 +72,7 @@ func TestExternallyControlledRun(t *testing.T) { go func() { defer wg.Done() es.MarkStarted() - errCh <- executor.Run(ctx, nil, nil) + errCh <- executor.Run(ctx, nil) es.MarkEnded() close(doneCh) }() diff --git a/lib/executor/per_vu_iterations.go b/lib/executor/per_vu_iterations.go index aed0fc2b051..f6be16913b0 100644 --- a/lib/executor/per_vu_iterations.go +++ b/lib/executor/per_vu_iterations.go @@ -31,7 +31,6 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/types" "go.k6.io/k6/stats" "go.k6.io/k6/ui/pb" @@ -152,9 +151,7 @@ var _ lib.Executor = &PerVUIterations{} // Run executes a specific number of iterations with each configured VU. // nolint:funlen -func (pvi PerVUIterations) Run( - parentCtx context.Context, out chan<- stats.SampleContainer, builtinMetrics *metrics.BuiltinMetrics, -) (err error) { +func (pvi PerVUIterations) Run(parentCtx context.Context, out chan<- stats.SampleContainer) (err error) { numVUs := pvi.config.GetVUs(pvi.executionState.ExecutionTuple) iterations := pvi.config.GetIterations() duration := pvi.config.MaxDuration.TimeDuration() @@ -214,7 +211,7 @@ func (pvi PerVUIterations) Run( activeVUs.Done() } - droppedIterationMetric := builtinMetrics.DroppedIterations + droppedIterationMetric := pvi.executionState.BuiltinMetrics.DroppedIterations handleVU := func(initVU lib.InitializedVU) { defer handleVUsWG.Done() ctx, cancel := context.WithCancel(maxDurationCtx) diff --git a/lib/executor/per_vu_iterations_test.go b/lib/executor/per_vu_iterations_test.go index b42637ed231..f00d2cae38e 100644 --- a/lib/executor/per_vu_iterations_test.go +++ b/lib/executor/per_vu_iterations_test.go @@ -32,8 +32,8 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/types" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) @@ -52,7 +52,9 @@ func TestPerVUIterationsRun(t *testing.T) { var result sync.Map et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) ctx, cancel, executor, _ := setupExecutor( t, getTestPerVUIterationsConfig(), es, simpleRunner(func(ctx context.Context, state *lib.State) error { @@ -63,9 +65,7 @@ func TestPerVUIterationsRun(t *testing.T) { ) defer cancel() engineOut := make(chan stats.SampleContainer, 1000) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, engineOut, builtinMetrics) + err = executor.Run(ctx, engineOut) require.NoError(t, err) var totalIters uint64 @@ -88,7 +88,9 @@ func TestPerVUIterationsRunVariableVU(t *testing.T) { ) et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) ctx, cancel, executor, _ := setupExecutor( t, getTestPerVUIterationsConfig(), es, simpleRunner(func(ctx context.Context, state *lib.State) error { @@ -102,9 +104,7 @@ func TestPerVUIterationsRunVariableVU(t *testing.T) { ) defer cancel() engineOut := make(chan stats.SampleContainer, 1000) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, engineOut, builtinMetrics) + err = executor.Run(ctx, engineOut) require.NoError(t, err) val, ok := result.Load(slowVUID) @@ -138,7 +138,9 @@ func TestPerVuIterationsEmitDroppedIterations(t *testing.T) { MaxDuration: types.NullDurationFrom(1 * time.Second), } - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) ctx, cancel, executor, logHook := setupExecutor( t, config, es, simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -149,9 +151,7 @@ func TestPerVuIterationsEmitDroppedIterations(t *testing.T) { ) defer cancel() engineOut := make(chan stats.SampleContainer, 1000) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, engineOut, builtinMetrics) + err = executor.Run(ctx, engineOut) require.NoError(t, err) assert.Empty(t, logHook.Drain()) assert.Equal(t, int64(5), count) diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index 096f1cf4fac..c99486ccef1 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -32,7 +32,6 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/types" "go.k6.io/k6/stats" "go.k6.io/k6/ui/pb" @@ -320,9 +319,7 @@ func noNegativeSqrt(f float64) float64 { // This will allow us to implement https://github.com/k6io/k6/issues/1386 // and things like all of the TODOs below in one place only. //nolint:funlen,cyclop -func (varr RampingArrivalRate) Run( - parentCtx context.Context, out chan<- stats.SampleContainer, builtinMetrics *metrics.BuiltinMetrics, -) (err error) { +func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.SampleContainer) (err error) { segment := varr.executionState.ExecutionTuple.Segment gracefulStop := varr.config.GetGracefulStop() duration := sumStagesDuration(varr.config.Stages) @@ -456,7 +453,7 @@ func (varr RampingArrivalRate) Run( shownWarning := false metricTags := varr.getMetricTags(nil) go varr.config.cal(varr.et, ch) - droppedIterationMetric := builtinMetrics.DroppedIterations + droppedIterationMetric := varr.executionState.BuiltinMetrics.DroppedIterations for nextTime := range ch { select { case <-regDurationDone: diff --git a/lib/executor/ramping_arrival_rate_test.go b/lib/executor/ramping_arrival_rate_test.go index 36ce5094a18..980303f0272 100644 --- a/lib/executor/ramping_arrival_rate_test.go +++ b/lib/executor/ramping_arrival_rate_test.go @@ -35,9 +35,9 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) @@ -69,7 +69,9 @@ func TestRampingArrivalRateRunNotEnoughAllocatedVUsWarn(t *testing.T) { t.Parallel() et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) ctx, cancel, executor, logHook := setupExecutor( t, getTestRampingArrivalRateConfig(), es, simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -79,9 +81,7 @@ func TestRampingArrivalRateRunNotEnoughAllocatedVUsWarn(t *testing.T) { ) defer cancel() engineOut := make(chan stats.SampleContainer, 1000) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, engineOut, builtinMetrics) + err = executor.Run(ctx, engineOut) require.NoError(t, err) entries := logHook.Drain() require.NotEmpty(t, entries) @@ -98,7 +98,9 @@ func TestRampingArrivalRateRunCorrectRate(t *testing.T) { var count int64 et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) ctx, cancel, executor, logHook := setupExecutor( t, getTestRampingArrivalRateConfig(), es, simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -127,9 +129,7 @@ func TestRampingArrivalRateRunCorrectRate(t *testing.T) { assert.InDelta(t, 50, currentCount, 3) }() engineOut := make(chan stats.SampleContainer, 1000) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, engineOut, builtinMetrics) + err = executor.Run(ctx, engineOut) wg.Wait() require.NoError(t, err) require.Empty(t, logHook.Drain()) @@ -139,7 +139,9 @@ func TestRampingArrivalRateRunUnplannedVUs(t *testing.T) { t.Parallel() et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 1, 3) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 1, 3) var count int64 ch := make(chan struct{}) // closed when new unplannedVU is started and signal to get to next iterations ch2 := make(chan struct{}) // closed when a second iteration was started on an old VU in order to test it won't start a second unplanned VU in parallel or at all @@ -192,9 +194,8 @@ func TestRampingArrivalRateRunUnplannedVUs(t *testing.T) { idl, idg := es.GetUniqueVUIdentifiers() return runner.NewVU(idl, idg, engineOut) }) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, engineOut, builtinMetrics) + + err = executor.Run(ctx, engineOut) assert.NoError(t, err) assert.Empty(t, logHook.Drain()) @@ -206,7 +207,9 @@ func TestRampingArrivalRateRunCorrectRateWithSlowRate(t *testing.T) { t.Parallel() et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 1, 3) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 1, 3) var count int64 ch := make(chan struct{}) // closed when new unplannedVU is started and signal to get to next iterations runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -245,9 +248,8 @@ func TestRampingArrivalRateRunCorrectRateWithSlowRate(t *testing.T) { idl, idg := es.GetUniqueVUIdentifiers() return runner.NewVU(idl, idg, engineOut) }) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, engineOut, builtinMetrics) + + err = executor.Run(ctx, engineOut) assert.NoError(t, err) assert.Empty(t, logHook.Drain()) assert.Equal(t, int64(0), es.GetCurrentlyActiveVUsCount()) @@ -258,7 +260,9 @@ func TestRampingArrivalRateRunGracefulStop(t *testing.T) { t.Parallel() et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 10) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 10) runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { time.Sleep(5 * time.Second) @@ -286,9 +290,7 @@ func TestRampingArrivalRateRunGracefulStop(t *testing.T) { engineOut := make(chan stats.SampleContainer, 1000) defer close(engineOut) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, engineOut, builtinMetrics) + err = executor.Run(ctx, engineOut) assert.NoError(t, err) assert.Equal(t, int64(0), es.GetCurrentlyActiveVUsCount()) assert.Equal(t, int64(10), es.GetInitializedVUsCount()) @@ -315,7 +317,12 @@ func BenchmarkRampingArrivalRateRun(b *testing.B) { } }() - es := lib.NewExecutionState(lib.Options{}, mustNewExecutionTuple(nil, nil), uint64(tc.prealloc.Int64), uint64(tc.prealloc.Int64)) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState( + lib.Options{}, mustNewExecutionTuple(nil, nil), builtinMetrics, + uint64(tc.prealloc.Int64), uint64(tc.prealloc.Int64), + ) var count int64 runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -348,9 +355,7 @@ func BenchmarkRampingArrivalRateRun(b *testing.B) { b.ResetTimer() start := time.Now() - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err := executor.Run(ctx, engineOut, builtinMetrics) + err := executor.Run(ctx, engineOut) took := time.Since(start) assert.NoError(b, err) @@ -742,7 +747,9 @@ func TestRampingArrivalRateGlobalIters(t *testing.T) { require.NoError(t, err) et, err := lib.NewExecutionTuple(seg, &ess) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 5, 5) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 5, 5) runner := &minirunner.MiniRunner{} ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) @@ -758,9 +765,7 @@ func TestRampingArrivalRateGlobalIters(t *testing.T) { } engineOut := make(chan stats.SampleContainer, 100) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, engineOut, builtinMetrics) + err = executor.Run(ctx, engineOut) require.NoError(t, err) assert.Equal(t, tc.expIters, gotIters) }) @@ -783,7 +788,10 @@ func TestRampingArrivalRateCornerCase(t *testing.T) { et, err := lib.NewExecutionTuple(newExecutionSegmentFromString("1/5:2/5"), newExecutionSegmentSequenceFromString("0,1/5,2/5,1")) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) executor, err := config.NewExecutor(es, nil) require.NoError(t, err) diff --git a/lib/executor/ramping_vus.go b/lib/executor/ramping_vus.go index 18a07b60306..0bc69c0a87d 100644 --- a/lib/executor/ramping_vus.go +++ b/lib/executor/ramping_vus.go @@ -31,7 +31,6 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/types" "go.k6.io/k6/stats" "go.k6.io/k6/ui/pb" @@ -507,7 +506,7 @@ func (vlv *RampingVUs) Init(_ context.Context) error { // Run constantly loops through as many iterations as possible on a variable // number of VUs for the specified stages. -func (vlv *RampingVUs) Run(ctx context.Context, _ chan<- stats.SampleContainer, _ *metrics.BuiltinMetrics) error { +func (vlv *RampingVUs) Run(ctx context.Context, _ chan<- stats.SampleContainer) error { regularDuration, isFinal := lib.GetEndOffset(vlv.rawSteps) if !isFinal { return fmt.Errorf("%s expected raw end offset at %s to be final", vlv.config.GetName(), regularDuration) diff --git a/lib/executor/ramping_vus_test.go b/lib/executor/ramping_vus_test.go index da8f861ff4f..d3ea9c8e3ee 100644 --- a/lib/executor/ramping_vus_test.go +++ b/lib/executor/ramping_vus_test.go @@ -84,7 +84,7 @@ func TestRampingVUsRun(t *testing.T) { var iterCount int64 et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) ctx, cancel, executor, _ := setupExecutor( t, config, es, simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -105,7 +105,7 @@ func TestRampingVUsRun(t *testing.T) { } errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil, nil) }() + go func() { errCh <- executor.Run(ctx, nil) }() result := make([]int64, len(sampleTimes)) for i, d := range sampleTimes { @@ -141,7 +141,7 @@ func TestRampingVUsGracefulStopWaits(t *testing.T) { et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) ctx, cancel, executor, _ := setupExecutor( t, config, es, simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -157,7 +157,7 @@ func TestRampingVUsGracefulStopWaits(t *testing.T) { ) defer cancel() errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil, nil) }() + go func() { errCh <- executor.Run(ctx, nil) }() <-started // 500 milliseconds more then the duration and 500 less then the gracefulStop @@ -190,7 +190,7 @@ func TestRampingVUsGracefulStopStops(t *testing.T) { et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) ctx, cancel, executor, _ := setupExecutor( t, config, es, simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -206,7 +206,7 @@ func TestRampingVUsGracefulStopStops(t *testing.T) { ) defer cancel() errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil, nil) }() + go func() { errCh <- executor.Run(ctx, nil) }() <-started // 500 milliseconds more then the gracefulStop + duration @@ -244,7 +244,7 @@ func TestRampingVUsGracefulRampDown(t *testing.T) { et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) ctx, cancel, executor, _ := setupExecutor( t, config, es, simpleRunner(func(ctx context.Context, state *lib.State) error { @@ -264,7 +264,7 @@ func TestRampingVUsGracefulRampDown(t *testing.T) { ) defer cancel() errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil, nil) }() + go func() { errCh <- executor.Run(ctx, nil) }() <-started // 500 milliseconds more then the gracefulRampDown + duration @@ -349,11 +349,11 @@ func TestRampingVUsHandleRemainingVUs(t *testing.T) { require.NoError(t, err) ctx, cancel, executor, _ := setupExecutor( t, cfg, - lib.NewExecutionState(lib.Options{}, et, maxVus, maxVus), + lib.NewExecutionState(lib.Options{}, et, nil, maxVus, maxVus), simpleRunner(iteration), ) defer cancel() - require.NoError(t, executor.Run(ctx, nil, nil)) + require.NoError(t, executor.Run(ctx, nil)) assert.Equal(t, wantVuInterrupted, atomic.LoadUint32(&gotVuInterrupted)) assert.Equal(t, wantVuFinished, atomic.LoadUint32(&gotVuFinished)) @@ -382,7 +382,7 @@ func TestRampingVUsRampDownNoWobble(t *testing.T) { et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) ctx, cancel, executor, _ := setupExecutor( t, config, es, simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -401,7 +401,7 @@ func TestRampingVUsRampDownNoWobble(t *testing.T) { int((config.Stages[len(config.Stages)-1].Duration.TimeDuration() + config.GracefulRampDown.TimeDuration()) / rampDownSampleTime) errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil, nil) }() + go func() { errCh <- executor.Run(ctx, nil) }() result := make([]int64, len(sampleTimes)+rampDownSamples) for i, d := range sampleTimes { diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go index e31936f01b6..cb6563c4e2e 100644 --- a/lib/executor/shared_iterations.go +++ b/lib/executor/shared_iterations.go @@ -31,7 +31,6 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/types" "go.k6.io/k6/stats" "go.k6.io/k6/ui/pb" @@ -184,9 +183,7 @@ func (si *SharedIterations) Init(ctx context.Context) error { // Run executes a specific total number of iterations, which are all shared by // the configured VUs. // nolint:funlen -func (si SharedIterations) Run( - parentCtx context.Context, out chan<- stats.SampleContainer, builtinMetrics *metrics.BuiltinMetrics, -) (err error) { +func (si SharedIterations) Run(parentCtx context.Context, out chan<- stats.SampleContainer) (err error) { numVUs := si.config.GetVUs(si.executionState.ExecutionTuple) iterations := si.et.ScaleInt64(si.config.Iterations.Int64) duration := si.config.MaxDuration.TimeDuration() @@ -227,8 +224,9 @@ func (si SharedIterations) Run( activeVUs.Wait() if attemptedIters < totalIters { stats.PushIfNotDone(parentCtx, out, stats.Sample{ - Value: float64(totalIters - attemptedIters), Metric: builtinMetrics.DroppedIterations, - Tags: si.getMetricTags(nil), Time: time.Now(), + Value: float64(totalIters - attemptedIters), + Metric: si.executionState.BuiltinMetrics.DroppedIterations, + Tags: si.getMetricTags(nil), Time: time.Now(), }) } }() diff --git a/lib/executor/shared_iterations_test.go b/lib/executor/shared_iterations_test.go index 197d2d78b65..7220d8d1d92 100644 --- a/lib/executor/shared_iterations_test.go +++ b/lib/executor/shared_iterations_test.go @@ -34,9 +34,9 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) @@ -54,7 +54,9 @@ func TestSharedIterationsRun(t *testing.T) { var doneIters uint64 et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) ctx, cancel, executor, _ := setupExecutor( t, getTestSharedIterationsConfig(), es, simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -63,9 +65,7 @@ func TestSharedIterationsRun(t *testing.T) { }), ) defer cancel() - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, nil, builtinMetrics) + err = executor.Run(ctx, nil) require.NoError(t, err) assert.Equal(t, uint64(100), doneIters) } @@ -80,7 +80,9 @@ func TestSharedIterationsRunVariableVU(t *testing.T) { ) et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) ctx, cancel, executor, _ := setupExecutor( t, getTestSharedIterationsConfig(), es, simpleRunner(func(ctx context.Context, state *lib.State) error { @@ -99,9 +101,7 @@ func TestSharedIterationsRunVariableVU(t *testing.T) { }), ) defer cancel() - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, nil, builtinMetrics) + err = executor.Run(ctx, nil) require.NoError(t, err) var totalIters uint64 @@ -130,7 +130,9 @@ func TestSharedIterationsEmitDroppedIterations(t *testing.T) { MaxDuration: types.NullDurationFrom(1 * time.Second), } - es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) ctx, cancel, executor, logHook := setupExecutor( t, config, es, simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -141,9 +143,7 @@ func TestSharedIterationsEmitDroppedIterations(t *testing.T) { ) defer cancel() engineOut := make(chan stats.SampleContainer, 1000) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, engineOut, builtinMetrics) + err = executor.Run(ctx, engineOut) require.NoError(t, err) assert.Empty(t, logHook.Drain()) assert.Equal(t, int64(5), count) @@ -178,7 +178,9 @@ func TestSharedIterationsGlobalIters(t *testing.T) { require.NoError(t, err) et, err := lib.NewExecutionTuple(seg, &ess) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 5, 5) + registry := metrics.NewRegistry() + builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 5, 5) runner := &minirunner.MiniRunner{} ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) @@ -194,9 +196,7 @@ func TestSharedIterationsGlobalIters(t *testing.T) { } engineOut := make(chan stats.SampleContainer, 100) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - err = executor.Run(ctx, engineOut, builtinMetrics) + err = executor.Run(ctx, engineOut) require.NoError(t, err) sort.Slice(gotIters, func(i, j int) bool { return gotIters[i] < gotIters[j] }) assert.Equal(t, tc.expIters, gotIters) diff --git a/lib/executors.go b/lib/executors.go index 1929c72e71f..e8ef95f49aa 100644 --- a/lib/executors.go +++ b/lib/executors.go @@ -31,7 +31,6 @@ import ( "github.com/sirupsen/logrus" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/stats" "go.k6.io/k6/ui/pb" ) @@ -129,7 +128,7 @@ type Executor interface { GetLogger() *logrus.Entry Init(ctx context.Context) error - Run(ctx context.Context, engineOut chan<- stats.SampleContainer, builtinMetrics *metrics.BuiltinMetrics) error + Run(ctx context.Context, engineOut chan<- stats.SampleContainer) error } // PausableExecutor should be implemented by the executors that can be paused diff --git a/lib/netext/dialer.go b/lib/netext/dialer.go index 56bf64a9b33..2f94178623d 100644 --- a/lib/netext/dialer.go +++ b/lib/netext/dialer.go @@ -29,8 +29,8 @@ import ( "time" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/types" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/lib/netext/httpext/request_test.go b/lib/netext/httpext/request_test.go index 3fd2c25b254..1b93383db41 100644 --- a/lib/netext/httpext/request_test.go +++ b/lib/netext/httpext/request_test.go @@ -42,7 +42,7 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/lib/netext/httpext/tracer.go b/lib/netext/httpext/tracer.go index 19accc0ad17..3af2b29015c 100644 --- a/lib/netext/httpext/tracer.go +++ b/lib/netext/httpext/tracer.go @@ -27,7 +27,7 @@ import ( "sync/atomic" "time" - "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" "gopkg.in/guregu/null.v3" ) diff --git a/lib/netext/httpext/tracer_test.go b/lib/netext/httpext/tracer_test.go index 01506a98774..049379d9c3d 100644 --- a/lib/netext/httpext/tracer_test.go +++ b/lib/netext/httpext/tracer_test.go @@ -41,9 +41,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/netext" "go.k6.io/k6/lib/types" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/lib/state.go b/lib/state.go index c314f055fd3..efa1a27c3f7 100644 --- a/lib/state.go +++ b/lib/state.go @@ -32,7 +32,7 @@ import ( "github.com/sirupsen/logrus" "golang.org/x/time/rate" - "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/lib/metrics/metrics.go b/metrics/builtin.go similarity index 99% rename from lib/metrics/metrics.go rename to metrics/builtin.go index d49f0e36629..fde4a9b728b 100644 --- a/lib/metrics/metrics.go +++ b/metrics/builtin.go @@ -25,7 +25,7 @@ import ( ) const ( - VUsName = "vus" //nolint:golint + VUsName = "vus" //nolint:revive VUsMaxName = "vus_max" IterationsName = "iterations" IterationDurationName = "iteration_duration" diff --git a/metrics/engine/engine.go b/metrics/engine/engine.go new file mode 100644 index 00000000000..e5ce9944b04 --- /dev/null +++ b/metrics/engine/engine.go @@ -0,0 +1,176 @@ +// Package engine contains the internal metrics engine responsible for +// aggregating metrics during the test and evaluating thresholds against them. +package engine + +import ( + "fmt" + "strings" + "sync" + + "github.com/sirupsen/logrus" + "go.k6.io/k6/lib" + "go.k6.io/k6/metrics" + "go.k6.io/k6/output" + "go.k6.io/k6/stats" + "gopkg.in/guregu/null.v3" +) + +// MetricsEngine is the internal metrics engine that k6 uses to keep track of +// aggregated metric sample values. They are used to generate the end-of-test +// summary and to evaluate the test thresholds. +type MetricsEngine struct { + registry *metrics.Registry + executionState *lib.ExecutionState + options lib.Options + runtimeOptions lib.RuntimeOptions + logger logrus.FieldLogger + + // These can be both top-level metrics or sub-metrics + metricsWithThresholds []*stats.Metric + + // TODO: completely refactor: + // - make these private, + // - do not use an unnecessary map for the observed metrics + // - have one lock per metric instead of a a global one, when + // the metrics are decoupled from their types + MetricsLock sync.Mutex + ObservedMetrics map[string]*stats.Metric +} + +// NewMetricsEngine creates a new metrics Engine with the given parameters. +func NewMetricsEngine( + registry *metrics.Registry, executionState *lib.ExecutionState, + opts lib.Options, rtOpts lib.RuntimeOptions, logger logrus.FieldLogger, +) (*MetricsEngine, error) { + me := &MetricsEngine{ + registry: registry, + executionState: executionState, + options: opts, + runtimeOptions: rtOpts, + logger: logger.WithField("component", "metrics-engine"), + + ObservedMetrics: make(map[string]*stats.Metric), + } + + if !(me.runtimeOptions.NoSummary.Bool && me.runtimeOptions.NoThresholds.Bool) { + err := me.initSubMetricsAndThresholds() + if err != nil { + return nil, err + } + } + + return me, nil +} + +// GetIngester returns a pseudo-Output that uses the given metric samples to +// update the engine's inner state. +func (me *MetricsEngine) GetIngester() output.Output { + return &outputIngester{ + logger: me.logger.WithField("component", "metrics-engine-ingester"), + metricsEngine: me, + } +} + +func (me *MetricsEngine) getThresholdMetricOrSubmetric(name string) (*stats.Metric, error) { + // TODO: replace with strings.Cut after Go 1.18 + nameParts := strings.SplitN(name, "{", 2) + + metric := me.registry.Get(nameParts[0]) + if metric == nil { + return nil, fmt.Errorf("metric '%s' does not exist in the script", nameParts[0]) + } + if len(nameParts) == 1 { // no sub-metric + return metric, nil + } + + submetricDefinition := nameParts[1] + if submetricDefinition[len(submetricDefinition)-1] != '}' { + return nil, fmt.Errorf("missing ending bracket, sub-metric format needs to be 'metric{key:value}'") + } + sm, err := metric.AddSubmetric(submetricDefinition[:len(submetricDefinition)-1]) + if err != nil { + return nil, err + } + return sm.Metric, nil +} + +func (me *MetricsEngine) markObserved(metric *stats.Metric) { + if !metric.Observed { + metric.Observed = true + me.ObservedMetrics[metric.Name] = metric + } +} + +func (me *MetricsEngine) initSubMetricsAndThresholds() error { + for metricName, thresholds := range me.options.Thresholds { + metric, err := me.getThresholdMetricOrSubmetric(metricName) + + if me.runtimeOptions.NoThresholds.Bool { + if err != nil { + me.logger.WithError(err).Warnf("Invalid metric '%s' in threshold definitions", metricName) + } + continue + } + + if err != nil { + return fmt.Errorf("invalid metric '%s' in threshold definitions: %w", metricName, err) + } + + metric.Thresholds = thresholds + me.metricsWithThresholds = append(me.metricsWithThresholds, metric) + + // Mark the metric (and the parent metric, if we're dealing with a + // submetric) as observed, so they are shown in the end-of-test summary, + // even if they don't have any metric samples during the test run + me.markObserved(metric) + if metric.Sub != nil { + me.markObserved(metric.Sub.Metric) + } + } + + // TODO: refactor out of here when https://github.com/grafana/k6/issues/1321 + // lands and there is a better way to enable a metric with tag + if me.options.SystemTags.Has(stats.TagExpectedResponse) { + _, err := me.getThresholdMetricOrSubmetric("http_req_duration{expected_response:true}") + if err != nil { + return err // shouldn't happen, but ¯\_(ツ)_/¯ + } + } + + return nil +} + +// EvaluateThresholds processes all of the thresholds. +// +// TODO: refactor, make private, optimize +func (me *MetricsEngine) EvaluateThresholds() (thresholdsTainted, shouldAbort bool) { + me.MetricsLock.Lock() + defer me.MetricsLock.Unlock() + + t := me.executionState.GetCurrentTestRunDuration() + + for _, m := range me.metricsWithThresholds { + if len(m.Thresholds.Thresholds) == 0 { + continue + } + m.Tainted = null.BoolFrom(false) + + me.logger.WithField("metric_name", m.Name).Debug("running thresholds") + succ, err := m.Thresholds.Run(m.Sink, t) + if err != nil { + me.logger.WithField("metric_name", m.Name).WithError(err).Error("Threshold error") + continue + } + if succ { + continue // threshold passed + } + me.logger.WithField("metric_name", m.Name).Debug("Thresholds failed") + m.Tainted = null.BoolFrom(true) + thresholdsTainted = true + if m.Thresholds.Abort { + shouldAbort = true + } + } + + return thresholdsTainted, shouldAbort +} diff --git a/metrics/engine/ingester.go b/metrics/engine/ingester.go new file mode 100644 index 00000000000..87bdceeadf2 --- /dev/null +++ b/metrics/engine/ingester.go @@ -0,0 +1,92 @@ +package engine + +import ( + "time" + + "github.com/sirupsen/logrus" + "go.k6.io/k6/output" +) + +const collectRate = 50 * time.Millisecond + +var _ output.Output = &outputIngester{} + +// outputIngester implements the output.Output interface and can be used to +// "feed" the MetricsEngine data from a `k6 run` test run. +type outputIngester struct { + output.SampleBuffer + logger logrus.FieldLogger + + metricsEngine *MetricsEngine + periodicFlusher *output.PeriodicFlusher +} + +// Description returns a human-readable description of the output. +func (oi *outputIngester) Description() string { + return "engine" +} + +// Start the engine by initializing a new output.PeriodicFlusher +func (oi *outputIngester) Start() error { + oi.logger.Debug("Starting...") + + pf, err := output.NewPeriodicFlusher(collectRate, oi.flushMetrics) + if err != nil { + return err + } + oi.logger.Debug("Started!") + oi.periodicFlusher = pf + + return nil +} + +// Stop flushes any remaining metrics and stops the goroutine. +func (oi *outputIngester) Stop() error { + oi.logger.Debug("Stopping...") + defer oi.logger.Debug("Stopped!") + oi.periodicFlusher.Stop() + return nil +} + +// flushMetrics Writes samples to the MetricsEngine +func (oi *outputIngester) flushMetrics() { + sampleContainers := oi.GetBufferedSamples() + if len(sampleContainers) == 0 { + return + } + + oi.metricsEngine.MetricsLock.Lock() + defer oi.metricsEngine.MetricsLock.Unlock() + + // TODO: split metric samples in buckets with a *stats.Metric key; this will + // allow us to have a per-bucket lock, instead of one global one, and it + // will allow us to split apart the metric Name and Type from its Sink and + // Observed fields... + // + // And, to further optimize things, if every metric (and sub-metric) had a + // sequential integer ID, we would be able to use a slice for these buckets + // and eliminate the map loopkups altogether! + + for _, sampleContainer := range sampleContainers { + samples := sampleContainer.GetSamples() + + if len(samples) == 0 { + continue + } + + for _, sample := range samples { + m := sample.Metric // this should have come from the Registry, no need to look it up + oi.metricsEngine.markObserved(m) // mark it as observed so it shows in the end-of-test summary + m.Sink.Add(sample) // finally, add its value to its own sink + + // and also to the same for any submetrics that match the metric sample + for _, sm := range m.Submetrics { + if !sample.Tags.Contains(sm.Tags) { + continue + } + oi.metricsEngine.markObserved(sm.Metric) + sm.Metric.Sink.Add(sample) + } + } + } +} diff --git a/metrics/package.go b/metrics/package.go new file mode 100644 index 00000000000..ff788918ffc --- /dev/null +++ b/metrics/package.go @@ -0,0 +1,10 @@ +// Package metrics contains various k6 components that deal with metrics and +// thresholds. +package metrics + +// TODO: move most things from the stats/ package here + +// TODO: maybe even move the outputs to a sub-folder here? it may be worth it to +// do a new Output v2 implementation that uses channels and is more usable and +// easier to write? this way the old extensions can still work for a while, with +// an adapter and a deprecation notice diff --git a/lib/metrics/registry.go b/metrics/registry.go similarity index 92% rename from lib/metrics/registry.go rename to metrics/registry.go index fc0a8c17a42..6f7f1be1d23 100644 --- a/lib/metrics/registry.go +++ b/metrics/registry.go @@ -85,3 +85,9 @@ func (r *Registry) MustNewMetric(name string, typ stats.MetricType, t ...stats.V } return m } + +// Get returns the Metric with the given name. If that metric doesn't exist, +// Get() will return a nil value. +func (r *Registry) Get(name string) *stats.Metric { + return r.metrics[name] +} diff --git a/lib/metrics/registry_test.go b/metrics/registry_test.go similarity index 100% rename from lib/metrics/registry_test.go rename to metrics/registry_test.go diff --git a/output/cloud/data.go b/output/cloud/data.go index c72413267ae..1178ddaf8fc 100644 --- a/output/cloud/data.go +++ b/output/cloud/data.go @@ -27,8 +27,8 @@ import ( "sort" "time" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/netext/httpext" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/output/cloud/data_test.go b/output/cloud/data_test.go index ebfbcb3e3b5..8e614dad2ac 100644 --- a/output/cloud/data_test.go +++ b/output/cloud/data_test.go @@ -31,8 +31,8 @@ import ( "github.com/stretchr/testify/assert" "gopkg.in/guregu/null.v3" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/netext/httpext" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/output/cloud/output.go b/output/cloud/output.go index 9eed64ebd05..2da3baadb21 100644 --- a/output/cloud/output.go +++ b/output/cloud/output.go @@ -37,9 +37,9 @@ import ( "go.k6.io/k6/lib" "go.k6.io/k6/lib/consts" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/netext" "go.k6.io/k6/lib/netext/httpext" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) diff --git a/output/cloud/output_test.go b/output/cloud/output_test.go index 558aeb8566f..17637397b23 100644 --- a/output/cloud/output_test.go +++ b/output/cloud/output_test.go @@ -43,12 +43,12 @@ import ( "go.k6.io/k6/cloudapi" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" "go.k6.io/k6/lib/netext" "go.k6.io/k6/lib/netext/httpext" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/httpmultibin" "go.k6.io/k6/lib/types" + "go.k6.io/k6/metrics" "go.k6.io/k6/output" "go.k6.io/k6/stats" ) diff --git a/output/json/json_easyjson.go b/output/json/json_easyjson.go index a854533d0dd..85210d84a77 100644 --- a/output/json/json_easyjson.go +++ b/output/json/json_easyjson.go @@ -311,8 +311,6 @@ func easyjson42239ddeDecodeGoK6IoK6Stats(in *jlexer.Lexer, out *stats.Metric) { } in.Delim(']') } - case "sub": - easyjson42239ddeDecodeGoK6IoK6Stats1(in, &out.Sub) default: in.SkipRecursive() } @@ -372,11 +370,6 @@ func easyjson42239ddeEncodeGoK6IoK6Stats(out *jwriter.Writer, in stats.Metric) { out.RawByte(']') } } - if true { - const prefix string = ",\"sub\":" - out.RawString(prefix) - easyjson42239ddeEncodeGoK6IoK6Stats1(out, in.Sub) - } out.RawByte('}') } func easyjson42239ddeDecodeGoK6IoK6Stats1(in *jlexer.Lexer, out *stats.Submetric) { @@ -400,8 +393,6 @@ func easyjson42239ddeDecodeGoK6IoK6Stats1(in *jlexer.Lexer, out *stats.Submetric switch key { case "name": out.Name = string(in.String()) - case "parent": - out.Parent = string(in.String()) case "suffix": out.Suffix = string(in.String()) case "tags": @@ -435,11 +426,6 @@ func easyjson42239ddeEncodeGoK6IoK6Stats1(out *jwriter.Writer, in stats.Submetri out.RawString(prefix[1:]) out.String(string(in.Name)) } - { - const prefix string = ",\"parent\":" - out.RawString(prefix) - out.String(string(in.Parent)) - } { const prefix string = ",\"suffix\":" out.RawString(prefix) diff --git a/output/json/json_test.go b/output/json/json_test.go index 413640fb23e..9f0cf9f68a4 100644 --- a/output/json/json_test.go +++ b/output/json/json_test.go @@ -72,10 +72,10 @@ func generateTestMetricSamples(t testing.TB) ([]stats.SampleContainer, func(io.R stats.Sample{Time: time3, Metric: metric2, Value: float64(5), Tags: stats.NewSampleTags(map[string]string{"tag3": "val3"})}, } expected := []string{ - `{"type":"Metric","data":{"name":"my_metric1","type":"gauge","contains":"default","tainted":null,"thresholds":["rate<0.01","p(99)<250"],"submetrics":null,"sub":{"name":"","parent":"","suffix":"","tags":null}},"metric":"my_metric1"}`, + `{"type":"Metric","data":{"name":"my_metric1","type":"gauge","contains":"default","tainted":null,"thresholds":["rate<0.01","p(99)<250"],"submetrics":null},"metric":"my_metric1"}`, `{"type":"Point","data":{"time":"2021-02-24T13:37:10Z","value":1,"tags":{"tag1":"val1"}},"metric":"my_metric1"}`, `{"type":"Point","data":{"time":"2021-02-24T13:37:10Z","value":2,"tags":{"tag2":"val2"}},"metric":"my_metric1"}`, - `{"type":"Metric","data":{"name":"my_metric2","type":"counter","contains":"data","tainted":null,"thresholds":[],"submetrics":null,"sub":{"name":"","parent":"","suffix":"","tags":null}},"metric":"my_metric2"}`, + `{"type":"Metric","data":{"name":"my_metric2","type":"counter","contains":"data","tainted":null,"thresholds":[],"submetrics":null},"metric":"my_metric2"}`, `{"type":"Point","data":{"time":"2021-02-24T13:37:20Z","value":3,"tags":{"key":"val"}},"metric":"my_metric2"}`, `{"type":"Point","data":{"time":"2021-02-24T13:37:20Z","value":4,"tags":{"key":"val"}},"metric":"my_metric1"}`, `{"type":"Point","data":{"time":"2021-02-24T13:37:30Z","value":5,"tags":{"tag3":"val3"}},"metric":"my_metric2"}`, diff --git a/output/manager.go b/output/manager.go new file mode 100644 index 00000000000..fdb88743e19 --- /dev/null +++ b/output/manager.go @@ -0,0 +1,81 @@ +package output + +import ( + "github.com/sirupsen/logrus" + "go.k6.io/k6/lib" + "go.k6.io/k6/stats" +) + +// Manager can be used to manage multiple outputs at the same time. +type Manager struct { + outputs []Output + logger logrus.FieldLogger + + testStopCallback func(error) +} + +// NewManager returns a new manager for the given outputs. +func NewManager(outputs []Output, logger logrus.FieldLogger, testStopCallback func(error)) *Manager { + return &Manager{ + outputs: outputs, + logger: logger.WithField("component", "output-manager"), + testStopCallback: testStopCallback, + } +} + +// StartOutputs spins up all configured outputs. If some output fails to start, +// it stops the already started ones. This may take some time, since some +// outputs make initial network requests to set up whatever remote services are +// going to listen to them. +func (om *Manager) StartOutputs() error { + om.logger.Debugf("Starting %d outputs...", len(om.outputs)) + for i, out := range om.outputs { + if stopOut, ok := out.(WithTestRunStop); ok { + stopOut.SetTestRunStopCallback(om.testStopCallback) + } + + if err := out.Start(); err != nil { + om.stopOutputs(i) + return err + } + } + return nil +} + +// StopOutputs stops all configured outputs. +func (om *Manager) StopOutputs() { + om.stopOutputs(len(om.outputs)) +} + +func (om *Manager) stopOutputs(upToID int) { + om.logger.Debugf("Stopping %d outputs...", upToID) + for i := 0; i < upToID; i++ { + if err := om.outputs[i].Stop(); err != nil { + om.logger.WithError(err).Errorf("Stopping output %d failed", i) + } + } +} + +// SetRunStatus checks which outputs implement the WithRunStatusUpdates +// interface and sets the provided RunStatus to them. +func (om *Manager) SetRunStatus(status lib.RunStatus) { + for _, out := range om.outputs { + if statUpdOut, ok := out.(WithRunStatusUpdates); ok { + statUpdOut.SetRunStatus(status) + } + } +} + +// AddMetricSamples is a temporary method to make the Manager usable in the +// current Engine. It needs to be replaced with the full metric pump. +// +// TODO: refactor +func (om *Manager) AddMetricSamples(sampleContainers []stats.SampleContainer) { + if len(sampleContainers) == 0 { + return + } + + for _, out := range om.outputs { + out.AddMetricSamples(sampleContainers) + } +} diff --git a/output/types.go b/output/types.go index 8aff0023fc8..42227e9e8f1 100644 --- a/output/types.go +++ b/output/types.go @@ -18,6 +18,9 @@ * */ +// Package output contains the interfaces that k6 outputs (and output +// extensions) have to implement, as well as some helpers to make their +// implementation and management easier. package output import ( @@ -29,7 +32,7 @@ import ( "github.com/spf13/afero" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/metrics" "go.k6.io/k6/stats" ) @@ -50,6 +53,8 @@ type Params struct { ExecutionPlan []lib.ExecutionStep } +// TODO: make v2 with buffered channels? + // An Output abstracts the process of funneling samples to an external storage // backend, such as a file or something like an InfluxDB instance. // diff --git a/stats/stats.go b/stats/stats.go index 5b34f71af1c..22f4866db0b 100644 --- a/stats/stats.go +++ b/stats/stats.go @@ -233,7 +233,7 @@ func (st *SampleTags) Contains(other *SampleTags) bool { } for k, v := range other.tags { - if st.tags[k] != v { + if myv, ok := st.tags[k]; !ok || myv != v { return false } } @@ -438,16 +438,22 @@ func PushIfNotDone(ctx context.Context, output chan<- SampleContainer, sample Sa return true } +// TODO: move to the metrics/ package + // A Metric defines the shape of a set of data. type Metric struct { - Name string `json:"name"` - Type MetricType `json:"type"` - Contains ValueType `json:"contains"` + Name string `json:"name"` + Type MetricType `json:"type"` + Contains ValueType `json:"contains"` + + // TODO: decouple the metrics from the sinks and thresholds... have them + // linked, but not in the same struct? Tainted null.Bool `json:"tainted"` Thresholds Thresholds `json:"thresholds"` Submetrics []*Submetric `json:"submetrics"` - Sub Submetric `json:"sub,omitempty"` + Sub *Submetric `json:"-"` Sink Sink `json:"-"` + Observed bool `json:"-"` } // Sample samples the metric at the given time, with the provided tags and value @@ -484,37 +490,62 @@ func New(name string, typ MetricType, t ...ValueType) *Metric { // A Submetric represents a filtered dataset based on a parent metric. type Submetric struct { Name string `json:"name"` - Parent string `json:"parent"` - Suffix string `json:"suffix"` + Suffix string `json:"suffix"` // TODO: rename? Tags *SampleTags `json:"tags"` - Metric *Metric `json:"-"` + + Metric *Metric `json:"-"` + Parent *Metric `json:"-"` } -// Creates a submetric from a name. -func NewSubmetric(name string) (parentName string, sm *Submetric) { - parts := strings.SplitN(strings.TrimSuffix(name, "}"), "{", 2) - if len(parts) == 1 { - return parts[0], &Submetric{Name: name} +// AddSubmetric creates a new submetric from the key:value threshold definition +// and adds it to the metric's submetrics list. +func (m *Metric) AddSubmetric(keyValues string) (*Submetric, error) { + keyValues = strings.TrimSpace(keyValues) + if len(keyValues) == 0 { + return nil, fmt.Errorf("submetric criteria for metric '%s' cannot be empty", m.Name) } - - kvs := strings.Split(parts[1], ",") - tags := make(map[string]string, len(kvs)) + kvs := strings.Split(keyValues, ",") + rawTags := make(map[string]string, len(kvs)) for _, kv := range kvs { if kv == "" { continue } parts := strings.SplitN(kv, ":", 2) - key := strings.TrimSpace(strings.Trim(parts[0], `"'`)) + key := strings.Trim(strings.TrimSpace(parts[0]), `"'`) if len(parts) != 2 { - tags[key] = "" + rawTags[key] = "" continue } - value := strings.TrimSpace(strings.Trim(parts[1], `"'`)) - tags[key] = value + value := strings.Trim(strings.TrimSpace(parts[1]), `"'`) + rawTags[key] = value } - return parts[0], &Submetric{Name: name, Parent: parts[0], Suffix: parts[1], Tags: IntoSampleTags(&tags)} + + tags := IntoSampleTags(&rawTags) + + for _, sm := range m.Submetrics { + if sm.Tags.IsEqual(tags) { + return nil, fmt.Errorf( + "sub-metric with params '%s' already exists for metric %s: %s", + keyValues, m.Name, sm.Name, + ) + } + } + + subMetric := &Submetric{ + Name: m.Name + "{" + keyValues + "}", + Suffix: keyValues, + Tags: tags, + Parent: m, + } + subMetricMetric := New(subMetric.Name, m.Type, m.Contains) + subMetricMetric.Sub = subMetric // sigh + subMetric.Metric = subMetricMetric + + m.Submetrics = append(m.Submetrics, subMetric) + + return subMetric, nil } // parsePercentile is a helper function to parse and validate percentile notations diff --git a/stats/stats_test.go b/stats/stats_test.go index 8aeb9f21159..a206be7707c 100644 --- a/stats/stats_test.go +++ b/stats/stats_test.go @@ -27,6 +27,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNew(t *testing.T) { @@ -52,33 +53,39 @@ func TestNew(t *testing.T) { } } -func TestNewSubmetric(t *testing.T) { +func TestAddSubmetric(t *testing.T) { t.Parallel() testdata := map[string]struct { - parent string - tags map[string]string + err bool + tags map[string]string }{ - "my_metric": {"my_metric", nil}, - "my_metric{}": {"my_metric", nil}, - "my_metric{a}": {"my_metric", map[string]string{"a": ""}}, - "my_metric{a:1}": {"my_metric", map[string]string{"a": "1"}}, - "my_metric{ a : 1 }": {"my_metric", map[string]string{"a": "1"}}, - "my_metric{a,b}": {"my_metric", map[string]string{"a": "", "b": ""}}, - "my_metric{a:1,b:2}": {"my_metric", map[string]string{"a": "1", "b": "2"}}, - "my_metric{ a : 1, b : 2 }": {"my_metric", map[string]string{"a": "1", "b": "2"}}, + "": {true, nil}, + " ": {true, nil}, + "a": {false, map[string]string{"a": ""}}, + "a:1": {false, map[string]string{"a": "1"}}, + " a : 1 ": {false, map[string]string{"a": "1"}}, + "a,b": {false, map[string]string{"a": "", "b": ""}}, + ` a:"",b: ''`: {false, map[string]string{"a": "", "b": ""}}, + `a:1,b:2`: {false, map[string]string{"a": "1", "b": "2"}}, + ` a : 1, b : 2 `: {false, map[string]string{"a": "1", "b": "2"}}, + `a : '1' , b : "2"`: {false, map[string]string{"a": "1", "b": "2"}}, + `" a" : ' 1' , b : "2 " `: {false, map[string]string{" a": " 1", "b": "2 "}}, //nolint:gocritic } - for name, data := range testdata { - name, data := name, data + for name, expected := range testdata { + name, expected := name, expected t.Run(name, func(t *testing.T) { t.Parallel() - parent, sm := NewSubmetric(name) - assert.Equal(t, data.parent, parent) - if data.tags != nil { - assert.EqualValues(t, data.tags, sm.Tags.tags) - } else { - assert.Nil(t, sm.Tags) + + m := New("metric", Trend) + sm, err := m.AddSubmetric(name) + if expected.err { + require.Error(t, err) + return } + require.NoError(t, err) + require.NotNil(t, sm) + assert.EqualValues(t, expected.tags, sm.Tags.tags) }) } } @@ -125,6 +132,7 @@ func TestSampleTags(t *testing.T) { assert.False(t, tags.IsEqual(IntoSampleTags(&map[string]string{"key1": "val1", "key2": "val3"}))) assert.True(t, tags.Contains(IntoSampleTags(&map[string]string{"key1": "val1"}))) assert.False(t, tags.Contains(IntoSampleTags(&map[string]string{"key3": "val1"}))) + assert.False(t, tags.Contains(IntoSampleTags(&map[string]string{"nonexistent_key": ""}))) assert.Equal(t, tagMap, tags.CloneTags()) assert.Nil(t, tags.json) // No cache