From 840605f954807bb81baafc2fef51ab569cdb31bc Mon Sep 17 00:00:00 2001 From: Kislay Kishore Date: Mon, 16 Dec 2024 12:25:02 +0530 Subject: [PATCH] Compute OTel metrics (#2744) * Compute OTel metrics GCSFuse metrics have been implemented in OTel. This means that one can now use the available OTel exporters to export these metrics. --- cmd/legacy_main.go | 3 + common/oc_metrics.go | 19 ++ common/otel_metrics.go | 139 ++++++++++ common/telemetry.go | 6 + internal/monitor/otelexporters.go | 16 ++ .../integration_tests/monitoring/prom_test.go | 250 ++++++++++++++++++ tools/integration_tests/run_e2e_tests.sh | 1 + tools/integration_tests/util/setup/setup.go | 11 + 8 files changed, 445 insertions(+) create mode 100644 common/otel_metrics.go create mode 100644 tools/integration_tests/monitoring/prom_test.go diff --git a/cmd/legacy_main.go b/cmd/legacy_main.go index 896f3472d5..68e64ac2fd 100644 --- a/cmd/legacy_main.go +++ b/cmd/legacy_main.go @@ -378,6 +378,9 @@ func Mount(newConfig *cfg.Config, bucketName, mountPoint string) (err error) { if cfg.IsMetricsEnabled(&newConfig.Metrics) { if newConfig.Metrics.EnableOtel { metricExporterShutdownFn = monitor.SetupOTelMetricExporters(ctx, newConfig) + if metricHandle, err = common.NewOTelMetrics(); err != nil { + metricHandle = common.NewNoopMetrics() + } } else { metricExporterShutdownFn = monitor.SetupOpenCensusExporters(newConfig) if metricHandle, err = common.NewOCMetrics(); err != nil { diff --git a/common/oc_metrics.go b/common/oc_metrics.go index 3e2cff1a2a..2bcf633615 100644 --- a/common/oc_metrics.go +++ b/common/oc_metrics.go @@ -23,6 +23,8 @@ import ( "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" ) const ( @@ -72,6 +74,23 @@ func attrsToTags(attrs []MetricAttr) []tag.Mutator { } return mutators } + +func attrsToRecordOption(attrs []MetricAttr) []metric.RecordOption { + otelOptions := make([]metric.RecordOption, 0, len(attrs)) + for _, attr := range attrs { + otelOptions = append(otelOptions, metric.WithAttributes(attribute.String(attr.Key, attr.Value))) + } + return otelOptions +} + +func attrsToAddOption(attrs []MetricAttr) []metric.AddOption { + otelOptions := make([]metric.AddOption, 0, len(attrs)) + for _, attr := range attrs { + otelOptions = append(otelOptions, metric.WithAttributes(attribute.String(attr.Key, attr.Value))) + } + return otelOptions +} + func (o *ocMetrics) GCSReadBytesCount(ctx context.Context, inc int64, attrs []MetricAttr) { recordOCMetric(ctx, o.gcsReadBytesCount, inc, attrs, "GCS read bytes count") } diff --git a/common/otel_metrics.go b/common/otel_metrics.go new file mode 100644 index 0000000000..7a1c5eb9de --- /dev/null +++ b/common/otel_metrics.go @@ -0,0 +1,139 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" +) + +var ( + fsOpsMeter = otel.Meter("fs_op") + gcsMeter = otel.Meter("gcs") + fileCacheMeter = otel.Meter("file_cache") +) + +// otelMetrics maintains the list of all metrics computed in GCSFuse. +type otelMetrics struct { + fsOpsCount metric.Int64Counter + fsOpsErrorCount metric.Int64Counter + fsOpsLatency metric.Float64Histogram + + gcsReadCount metric.Int64Counter + gcsReadBytesCount metric.Int64Counter + gcsReaderCount metric.Int64Counter + gcsRequestCount metric.Int64Counter + gcsRequestLatency metric.Float64Histogram + gcsDownloadBytesCount metric.Int64Counter + + fileCacheReadCount metric.Int64Counter + fileCacheReadBytesCount metric.Int64Counter + fileCacheReadLatency metric.Float64Histogram +} + +func (o *otelMetrics) GCSReadBytesCount(ctx context.Context, inc int64, attrs []MetricAttr) { + o.gcsReadBytesCount.Add(ctx, inc, attrsToAddOption(attrs)...) +} + +func (o *otelMetrics) GCSReaderCount(ctx context.Context, inc int64, attrs []MetricAttr) { + o.gcsReaderCount.Add(ctx, inc, attrsToAddOption(attrs)...) +} + +func (o *otelMetrics) GCSRequestCount(ctx context.Context, inc int64, attrs []MetricAttr) { + o.gcsRequestCount.Add(ctx, inc, attrsToAddOption(attrs)...) +} + +func (o *otelMetrics) GCSRequestLatency(ctx context.Context, value float64, attrs []MetricAttr) { + o.gcsRequestLatency.Record(ctx, value, attrsToRecordOption(attrs)...) +} + +func (o *otelMetrics) GCSReadCount(ctx context.Context, inc int64, attrs []MetricAttr) { + o.gcsReadCount.Add(ctx, inc, attrsToAddOption(attrs)...) +} + +func (o *otelMetrics) GCSDownloadBytesCount(ctx context.Context, inc int64, attrs []MetricAttr) { + o.gcsDownloadBytesCount.Add(ctx, inc, attrsToAddOption(attrs)...) +} + +func (o *otelMetrics) OpsCount(ctx context.Context, inc int64, attrs []MetricAttr) { + o.fsOpsCount.Add(ctx, inc, attrsToAddOption(attrs)...) +} + +func (o *otelMetrics) OpsLatency(ctx context.Context, value float64, attrs []MetricAttr) { + o.fsOpsLatency.Record(ctx, value, attrsToRecordOption(attrs)...) +} + +func (o *otelMetrics) OpsErrorCount(ctx context.Context, inc int64, attrs []MetricAttr) { + o.fsOpsErrorCount.Add(ctx, inc, attrsToAddOption(attrs)...) +} + +func (o *otelMetrics) FileCacheReadCount(ctx context.Context, inc int64, attrs []MetricAttr) { + o.fileCacheReadCount.Add(ctx, inc, attrsToAddOption(attrs)...) +} + +func (o *otelMetrics) FileCacheReadBytesCount(ctx context.Context, inc int64, attrs []MetricAttr) { + o.fileCacheReadBytesCount.Add(ctx, inc, attrsToAddOption(attrs)...) +} + +func (o *otelMetrics) FileCacheReadLatency(ctx context.Context, value float64, attrs []MetricAttr) { + o.fileCacheReadLatency.Record(ctx, value, attrsToRecordOption(attrs)...) +} + +func NewOTelMetrics() (MetricHandle, error) { + fsOpsCount, err1 := fsOpsMeter.Int64Counter("fs/ops_count", metric.WithDescription("The number of ops processed by the file system.")) + fsOpsLatency, err2 := fsOpsMeter.Float64Histogram("fs/ops_latency", metric.WithDescription("The latency of a file system operation."), metric.WithUnit("us"), + defaultLatencyDistribution) + fsOpsErrorCount, err3 := fsOpsMeter.Int64Counter("fs/ops_error_count", metric.WithDescription("The number of errors generated by file system operation.")) + + gcsReadCount, err4 := gcsMeter.Int64Counter("gcs/read_count", metric.WithDescription("Specifies the number of gcs reads made along with type - Sequential/Random")) + gcsDownloadBytesCount, err5 := gcsMeter.Int64Counter("gcs/download_bytes_count", + metric.WithDescription("The cumulative number of bytes downloaded from GCS along with type - Sequential/Random"), + metric.WithUnit("By")) + gcsReadBytesCount, err6 := gcsMeter.Int64Counter("gcs/read_bytes_count", metric.WithDescription("The number of bytes read from GCS objects."), metric.WithUnit("By")) + gcsReaderCount, err7 := gcsMeter.Int64Counter("gcs/reader_count", metric.WithDescription("The number of GCS object readers opened or closed.")) + gcsRequestCount, err8 := gcsMeter.Int64Counter("gcs/request_count", metric.WithDescription("The cumulative number of GCS requests processed.")) + gcsRequestLatency, err9 := gcsMeter.Float64Histogram("gcs/request_latency", metric.WithDescription("The latency of a GCS request."), metric.WithUnit("ms")) + + fileCacheReadCount, err10 := fileCacheMeter.Int64Counter("file_cache/read_count", + metric.WithDescription("Specifies the number of read requests made via file cache along with type - Sequential/Random and cache hit - true/false")) + fileCacheReadBytesCount, err11 := fileCacheMeter.Int64Counter("file_cache/read_bytes_count", + metric.WithDescription("The cumulative number of bytes read from file cache along with read type - Sequential/Random"), + metric.WithUnit("By")) + fileCacheReadLatency, err12 := fileCacheMeter.Float64Histogram("file_cache/read_latencies", + metric.WithDescription("Latency of read from file cache along with cache hit - true/false"), + metric.WithUnit("us"), + defaultLatencyDistribution) + + if err := errors.Join(err1, err2, err3, err4, err5, err6, err7, err8, err9, err10, err11, err12); err != nil { + return nil, err + } + return &otelMetrics{ + fsOpsCount: fsOpsCount, + fsOpsErrorCount: fsOpsErrorCount, + fsOpsLatency: fsOpsLatency, + gcsReadCount: gcsReadCount, + gcsReadBytesCount: gcsReadBytesCount, + gcsReaderCount: gcsReaderCount, + gcsRequestCount: gcsRequestCount, + gcsRequestLatency: gcsRequestLatency, + gcsDownloadBytesCount: gcsDownloadBytesCount, + fileCacheReadCount: fileCacheReadCount, + fileCacheReadBytesCount: fileCacheReadBytesCount, + fileCacheReadLatency: fileCacheReadLatency, + }, nil +} diff --git a/common/telemetry.go b/common/telemetry.go index c23dfa2680..ac42554dcc 100644 --- a/common/telemetry.go +++ b/common/telemetry.go @@ -18,10 +18,16 @@ import ( "context" "errors" "fmt" + + "go.opentelemetry.io/otel/metric" ) type ShutdownFn func(ctx context.Context) error +// The default time buckets for latency metrics. +// The unit can however change for different units i.e. for one metric the unit could be microseconds and for another it could be milliseconds. +var defaultLatencyDistribution = metric.WithExplicitBucketBoundaries(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + // JoinShutdownFunc combines the provided shutdown functions into a single function. func JoinShutdownFunc(shutdownFns ...ShutdownFn) ShutdownFn { return func(ctx context.Context) error { diff --git a/internal/monitor/otelexporters.go b/internal/monitor/otelexporters.go index eea69fb8ed..bf04364a90 100644 --- a/internal/monitor/otelexporters.go +++ b/internal/monitor/otelexporters.go @@ -39,6 +39,8 @@ import ( const serviceName = "gcsfuse" const cloudMonitoringMetricPrefix = "custom.googleapis.com/gcsfuse/" +var allowedMetricPrefixes = []string{"fs/", "gcs/", "file_cache/"} + // SetupOTelMetricExporters sets up the metrics exporters func SetupOTelMetricExporters(ctx context.Context, c *cfg.Config) (shutdownFn common.ShutdownFn) { shutdownFns := make([]common.ShutdownFn, 0) @@ -59,6 +61,8 @@ func SetupOTelMetricExporters(ctx context.Context, c *cfg.Config) (shutdownFn co options = append(options, metric.WithResource(res)) } + options = append(options, metric.WithView(dropDisallowedMetricsView)) + meterProvider := metric.NewMeterProvider(options...) shutdownFns = append(shutdownFns, meterProvider.Shutdown) @@ -67,6 +71,18 @@ func SetupOTelMetricExporters(ctx context.Context, c *cfg.Config) (shutdownFn co return common.JoinShutdownFunc(shutdownFns...) } +// dropUnwantedMetricsView is an OTel View that drops the metrics that don't match the allowed prefixes. +func dropDisallowedMetricsView(i metric.Instrument) (metric.Stream, bool) { + s := metric.Stream{Name: i.Name, Description: i.Description, Unit: i.Unit} + for _, prefix := range allowedMetricPrefixes { + if strings.HasPrefix(i.Name, prefix) { + return s, true + } + } + s.Aggregation = metric.AggregationDrop{} + return s, true +} + func setupCloudMonitoring(secs int64) ([]metric.Option, common.ShutdownFn) { if secs <= 0 { return nil, nil diff --git a/tools/integration_tests/monitoring/prom_test.go b/tools/integration_tests/monitoring/prom_test.go new file mode 100644 index 0000000000..ce462de8f7 --- /dev/null +++ b/tools/integration_tests/monitoring/prom_test.go @@ -0,0 +1,250 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoring + +import ( + "context" + "fmt" + "net/http" + "os" + "os/exec" + "path" + "strings" + "testing" + + "github.com/googlecloudplatform/gcsfuse/v2/tools/integration_tests/util/client" + "github.com/googlecloudplatform/gcsfuse/v2/tools/integration_tests/util/mounting" + "github.com/googlecloudplatform/gcsfuse/v2/tools/integration_tests/util/setup" + "github.com/googlecloudplatform/gcsfuse/v2/tools/util" + promclient "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +const ( + testBucket = "gcsfuse_monitoring_test_bucket" + portNonHNSRun = 9191 + portHNSRun = 9192 +) + +var prometheusPort = portNonHNSRun + +func isPortOpen(port int) bool { + c := exec.Command("lsof", "-t", fmt.Sprintf("-i:%d", port)) + output, _ := c.CombinedOutput() + return len(output) == 0 +} + +type PromTest struct { + suite.Suite + // Path to the gcsfuse binary. + gcsfusePath string + + // A temporary directory into which a file system may be mounted. Removed in + // TearDown. + mountPoint string + + enableOTEL bool +} + +// isHNSTestRun returns true if the bucket is an HNS bucket. +func isHNSTestRun(t *testing.T) bool { + storageClient, err := client.CreateStorageClient(context.Background()) + require.NoError(t, err, "error while creating storage client") + defer storageClient.Close() + return setup.IsHierarchicalBucket(context.Background(), storageClient) +} + +func (testSuite *PromTest) SetupSuite() { + setup.IgnoreTestIfIntegrationTestFlagIsNotSet(testSuite.T()) + if isHNSTestRun(testSuite.T()) { + // sets different Prometheus ports for HNS and non-HNS presubmit runs. + // This ensures that there is no port contention if both HNS and non-HNS test runs are happening simultaneously. + prometheusPort = portHNSRun + } + + err := setup.SetUpTestDir() + require.NoErrorf(testSuite.T(), err, "error while building GCSFuse: %p", err) +} + +func (testSuite *PromTest) SetupTest() { + var err error + testSuite.gcsfusePath = setup.BinFile() + testSuite.mountPoint, err = os.MkdirTemp("", "gcsfuse_monitoring_tests") + require.NoError(testSuite.T(), err) + + setup.SetLogFile(fmt.Sprintf("%s%s.txt", "/tmp/gcsfuse_monitoring_test_", strings.ReplaceAll(testSuite.T().Name(), "/", "_"))) + err = testSuite.mount(testBucket) + require.NoError(testSuite.T(), err) +} + +func (testSuite *PromTest) TearDownTest() { + if err := util.Unmount(testSuite.mountPoint); err != nil { + fmt.Fprintf(os.Stderr, "Warning: unmount failed: %v\n", err) + } + require.True(testSuite.T(), isPortOpen(prometheusPort)) + + err := os.Remove(testSuite.mountPoint) + assert.NoError(testSuite.T(), err) +} + +func (testSuite *PromTest) TearDownSuite() { + os.RemoveAll(setup.TestDir()) +} + +func (testSuite *PromTest) mount(bucketName string) error { + testSuite.T().Helper() + if portAvailable := isPortOpen(prometheusPort); !portAvailable { + require.Failf(testSuite.T(), "prometheus port is not available.", "port: %d", int64(prometheusPort)) + } + cacheDir, err := os.MkdirTemp("", "gcsfuse-cache") + require.NoError(testSuite.T(), err) + testSuite.T().Cleanup(func() { _ = os.RemoveAll(cacheDir) }) + + flags := []string{fmt.Sprintf("--prometheus-port=%d", prometheusPort), "--cache-dir", cacheDir} + if testSuite.enableOTEL { + flags = append(flags, "--enable-otel=true") + } else { + flags = append(flags, "--enable-otel=false") + } + args := append(flags, bucketName, testSuite.mountPoint) + + if err := mounting.MountGcsfuse(testSuite.gcsfusePath, args); err != nil { + return err + } + return nil +} + +func parsePromFormat(testSuite *PromTest) (map[string]*promclient.MetricFamily, error) { + testSuite.T().Helper() + + resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", prometheusPort)) + require.NoError(testSuite.T(), err) + var parser expfmt.TextParser + return parser.TextToMetricFamilies(resp.Body) +} + +// assertNonZeroCountMetric asserts that the specified count metric is present and is positive in the Prometheus export +func assertNonZeroCountMetric(testSuite *PromTest, metricName, labelName, labelValue string) { + testSuite.T().Helper() + mf, err := parsePromFormat(testSuite) + require.NoError(testSuite.T(), err) + for k, v := range mf { + if k != metricName || *v.Type != promclient.MetricType_COUNTER { + continue + } + for _, m := range v.Metric { + if *m.Counter.Value <= 0 { + continue + } + if labelName == "" { + return + } + for _, l := range m.GetLabel() { + if *l.Name == labelName && *l.Value == labelValue { + return + } + } + } + + } + assert.Fail(testSuite.T(), "Didn't find the metric with name: %s, labelName: %s and labelValue: %s", metricName, labelName, labelValue) +} + +// assertNonZeroHistogramMetric asserts that the specified histogram metric is present and is positive for at least one of the buckets in the Prometheus export. +func assertNonZeroHistogramMetric(testSuite *PromTest, metricName, labelName, labelValue string) { + testSuite.T().Helper() + mf, err := parsePromFormat(testSuite) + require.NoError(testSuite.T(), err) + + for k, v := range mf { + if k != metricName || *v.Type != promclient.MetricType_HISTOGRAM { + continue + } + for _, m := range v.Metric { + for _, bkt := range m.GetHistogram().Bucket { + if bkt.CumulativeCount == nil || *bkt.CumulativeCount == 0 { + continue + } + if labelName == "" { + return + } + for _, l := range m.GetLabel() { + if *l.Name == labelName && *l.Value == labelValue { + return + } + } + } + } + } +} + +func (testSuite *PromTest) TestStatMetrics() { + _, err := os.Stat(path.Join(testSuite.mountPoint, "hello/hello.txt")) + + require.NoError(testSuite.T(), err) + assertNonZeroCountMetric(testSuite, "fs_ops_count", "fs_op", "LookUpInode") + assertNonZeroHistogramMetric(testSuite, "fs_ops_latency", "fs_op", "LookUpInode") + assertNonZeroCountMetric(testSuite, "gcs_request_count", "gcs_method", "StatObject") + assertNonZeroHistogramMetric(testSuite, "gcs_request_latencies", "gcs_method", "StatObject") +} + +func (testSuite *PromTest) TestFsOpsErrorMetrics() { + _, err := os.Stat(path.Join(testSuite.mountPoint, "non_existent_path.txt")) + require.Error(testSuite.T(), err) + + assertNonZeroCountMetric(testSuite, "fs_ops_error_count", "fs_op", "LookUpInode") + assertNonZeroHistogramMetric(testSuite, "fs_ops_latency", "fs_op", "LookUpInode") +} + +func (testSuite *PromTest) TestListMetrics() { + _, err := os.ReadDir(path.Join(testSuite.mountPoint, "hello")) + + require.NoError(testSuite.T(), err) + assertNonZeroCountMetric(testSuite, "fs_ops_count", "fs_op", "ReadDir") + assertNonZeroCountMetric(testSuite, "fs_ops_count", "fs_op", "OpenDir") + assertNonZeroCountMetric(testSuite, "gcs_request_count", "gcs_method", "ListObjects") + assertNonZeroHistogramMetric(testSuite, "gcs_request_latencies", "gcs_method", "ListObjects") +} + +func (testSuite *PromTest) TestReadMetrics() { + _, err := os.ReadFile(path.Join(testSuite.mountPoint, "hello/hello.txt")) + + require.NoError(testSuite.T(), err) + assertNonZeroCountMetric(testSuite, "file_cache_read_count", "cache_hit", "false") + assertNonZeroCountMetric(testSuite, "file_cache_read_count", "read_type", "Sequential") + assertNonZeroCountMetric(testSuite, "file_cache_read_bytes_count", "read_type", "Sequential") + assertNonZeroHistogramMetric(testSuite, "file_cache_read_latencies", "cache_hit", "false") + assertNonZeroCountMetric(testSuite, "fs_ops_count", "fs_op", "OpenFile") + assertNonZeroCountMetric(testSuite, "fs_ops_count", "fs_op", "ReadFile") + assertNonZeroCountMetric(testSuite, "fs_ops_count", "fs_op", "ReadFile") + assertNonZeroCountMetric(testSuite, "gcs_request_count", "gcs_method", "NewReader") + assertNonZeroCountMetric(testSuite, "gcs_reader_count", "io_method", "opened") + assertNonZeroCountMetric(testSuite, "gcs_reader_count", "io_method", "closed") + assertNonZeroCountMetric(testSuite, "gcs_read_count", "read_type", "Sequential") + assertNonZeroCountMetric(testSuite, "gcs_download_bytes_count", "", "") + assertNonZeroCountMetric(testSuite, "gcs_read_bytes_count", "", "") + assertNonZeroHistogramMetric(testSuite, "gcs_request_latencies", "gcs_method", "NewReader") +} + +func TestPromOCSuite(t *testing.T) { + suite.Run(t, &PromTest{enableOTEL: false}) +} + +func TestPromOTELSuite(t *testing.T) { + suite.Run(t, &PromTest{enableOTEL: true}) +} diff --git a/tools/integration_tests/run_e2e_tests.sh b/tools/integration_tests/run_e2e_tests.sh index 6d460b8e7a..b06a08ce9d 100755 --- a/tools/integration_tests/run_e2e_tests.sh +++ b/tools/integration_tests/run_e2e_tests.sh @@ -64,6 +64,7 @@ echo "Setting the integration test timeout to: $INTEGRATION_TEST_TIMEOUT" readonly RANDOM_STRING_LENGTH=5 # Test directory arrays TEST_DIR_PARALLEL=( + "monitoring" "local_file" "log_rotation" "mounting" diff --git a/tools/integration_tests/util/setup/setup.go b/tools/integration_tests/util/setup/setup.go index 3fedf1ad04..66c919ce87 100644 --- a/tools/integration_tests/util/setup/setup.go +++ b/tools/integration_tests/util/setup/setup.go @@ -273,6 +273,17 @@ func IgnoreTestIfIntegrationTestFlagIsSet(t *testing.T) { } } +// IgnoreTestIfIntegrationTestFlagIsNotSet helps skip a test if --integrationTest flag is not set. +// If the test uses TestMain, then one usually calls os.Exit() to skip the test, +// but for non-TestMain tests, this helps skip integration tests if --integrationTest has not been passed. +func IgnoreTestIfIntegrationTestFlagIsNotSet(t *testing.T) { + flag.Parse() + + if !*integrationTest { + t.SkipNow() + } +} + func ExitWithFailureIfBothTestBucketAndMountedDirectoryFlagsAreNotSet() { ParseSetUpFlags()