diff --git a/exporter/alertmanagerexporter/alertmanager_exporter_test.go b/exporter/alertmanagerexporter/alertmanager_exporter_test.go index 991d14c04432..f80516145ae2 100644 --- a/exporter/alertmanagerexporter/alertmanager_exporter_test.go +++ b/exporter/alertmanagerexporter/alertmanager_exporter_test.go @@ -278,7 +278,7 @@ func TestAlertManagerTracesExporterNoErrors(t *testing.T) { type ( MockServer struct { - mockserver *httptest.Server // this means MockServer aggreagates 'httptest.Server', but can it's more like inheritance in C++ + mockserver *httptest.Server // this means MockServer aggregates 'httptest.Server', but can it's more like inheritance in C++ fooCalledSuccessfully bool // this is false by default } ) diff --git a/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go index 9a1b4ab37d40..1247c6e399fa 100644 --- a/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go @@ -29,7 +29,7 @@ func createLogData(numberOfLogs int) plog.Logs { logs := plog.NewLogs() logs.ResourceLogs().AppendEmpty() // Add an empty ResourceLogs rl := logs.ResourceLogs().AppendEmpty() - rl.Resource().Attributes().PutStr("resouceKey", "resourceValue") + rl.Resource().Attributes().PutStr("resourceKey", "resourceValue") rl.Resource().Attributes().PutStr(conventions.AttributeServiceName, "test-log-service-exporter") rl.Resource().Attributes().PutStr(conventions.AttributeHostName, "test-host") sl := rl.ScopeLogs().AppendEmpty() diff --git a/exporter/alibabacloudlogserviceexporter/metrics_exporter.go b/exporter/alibabacloudlogserviceexporter/metrics_exporter.go index cf1dbb241c59..3abe96b6d532 100644 --- a/exporter/alibabacloudlogserviceexporter/metrics_exporter.go +++ b/exporter/alibabacloudlogserviceexporter/metrics_exporter.go @@ -13,7 +13,7 @@ import ( "go.uber.org/zap" ) -// newMetricsExporter return a new LogSerice metrics exporter. +// newMetricsExporter return a new LogService metrics exporter. func newMetricsExporter(set exporter.Settings, cfg component.Config) (exporter.Metrics, error) { l := &logServiceMetricsSender{ logger: set.Logger, diff --git a/exporter/alibabacloudlogserviceexporter/testdata/logservice_log_data.json b/exporter/alibabacloudlogserviceexporter/testdata/logservice_log_data.json index 41e91951965e..305d527733e0 100644 --- a/exporter/alibabacloudlogserviceexporter/testdata/logservice_log_data.json +++ b/exporter/alibabacloudlogserviceexporter/testdata/logservice_log_data.json @@ -10,7 +10,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -64,7 +64,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -118,7 +118,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -172,7 +172,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -226,7 +226,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -280,7 +280,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -334,7 +334,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -388,7 +388,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -442,7 +442,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", diff --git a/exporter/alibabacloudlogserviceexporter/trace_exporter.go b/exporter/alibabacloudlogserviceexporter/trace_exporter.go index cfc685561ac7..90d8f7f9b69d 100644 --- a/exporter/alibabacloudlogserviceexporter/trace_exporter.go +++ b/exporter/alibabacloudlogserviceexporter/trace_exporter.go @@ -13,7 +13,7 @@ import ( "go.uber.org/zap" ) -// newTracesExporter return a new LogSerice trace exporter. +// newTracesExporter return a new LogService trace exporter. func newTracesExporter(set exporter.Settings, cfg component.Config) (exporter.Traces, error) { l := &logServiceTraceSender{ logger: set.Logger, diff --git a/exporter/awscloudwatchlogsexporter/config.go b/exporter/awscloudwatchlogsexporter/config.go index 24cce8c0f9bd..b5a095c6bdf5 100644 --- a/exporter/awscloudwatchlogsexporter/config.go +++ b/exporter/awscloudwatchlogsexporter/config.go @@ -37,7 +37,7 @@ type Config struct { // Possible values are 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 2192, 2557, 2922, 3288, or 3653 LogRetention int64 `mapstructure:"log_retention"` - // Tags is the option to set tags for the CloudWatch Log Group. If specified, please add add at least 1 and at most 50 tags. Input is a string to string map like so: { 'key': 'value' } + // Tags is the option to set tags for the CloudWatch Log Group. If specified, please add at least 1 and at most 50 tags. Input is a string to string map like so: { 'key': 'value' } // Keys must be between 1-128 characters and follow the regex pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]+)$ // Values must be between 1-256 characters and follow the regex pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$ Tags map[string]*string `mapstructure:"tags"` diff --git a/exporter/awsemfexporter/README.md b/exporter/awsemfexporter/README.md index 6eb5d1dd4ebf..77a06400341c 100644 --- a/exporter/awsemfexporter/README.md +++ b/exporter/awsemfexporter/README.md @@ -41,7 +41,7 @@ The following exporter configuration parameters are supported. | `role_arn` | IAM role to upload segments to a different account. | | | `max_retries` | Maximum number of retries before abandoning an attempt to post data. | 1 | | `dimension_rollup_option` | DimensionRollupOption is the option for metrics dimension rollup. Three options are available: `NoDimensionRollup`, `SingleDimensionRollupOnly` and `ZeroAndSingleDimensionRollup`. The default value is `ZeroAndSingleDimensionRollup`. Enabling feature gate `awsemf.nodimrollupdefault` will set default to `NoDimensionRollup`. |"ZeroAndSingleDimensionRollup" (Enable both zero dimension rollup and single dimension rollup)| -| `resource_to_telemetry_conversion` | "resource_to_telemetry_conversion" is the option for converting resource attributes to telemetry attributes. It has only one config onption- `enabled`. For metrics, if `enabled=true`, all the resource attributes will be converted to metric labels by default. See `Resource Attributes to Metric Labels` section below for examples. | `enabled=false` | +| `resource_to_telemetry_conversion` | "resource_to_telemetry_conversion" is the option for converting resource attributes to telemetry attributes. It has only one config option- `enabled`. For metrics, if `enabled=true`, all the resource attributes will be converted to metric labels by default. See `Resource Attributes to Metric Labels` section below for examples. | `enabled=false` | | `output_destination` | "output_destination" is an option to specify the EMFExporter output. Currently, two options are available. "cloudwatch" or "stdout" | `cloudwatch` | | `detailed_metrics` | Retain detailed datapoint values in exported metrics (e.g instead of exporting a quantile as a statistical value, preserve the quantile's population) | `false` | | `parse_json_encoded_attr_values` | List of attribute keys whose corresponding values are JSON-encoded strings and will be converted to JSON structures in emf logs. For example, the attribute string value "{\\"x\\":5,\\"y\\":6}" will be converted to a json object: ```{"x": 5, "y": 6}``` | [ ] | @@ -73,7 +73,7 @@ A metric descriptor section allows the schema of a metric to be overwritten befo | Name | Description | Default | | :---------------- | :--------------------------------------------------------------------- | ------- | | `metric_name` | The name of the metric to be overwritten. | | -| `unit` | The overwritten value of unit. The [MetricDatum](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html) contains a ful list of supported unit values. | | +| `unit` | The overwritten value of unit. The [MetricDatum](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html) contains a full list of supported unit values. | | | `overwrite` | `true` if the schema should be overwritten with the given specification, otherwise it will only be configured if empty. | false | diff --git a/exporter/awsemfexporter/config.go b/exporter/awsemfexporter/config.go index 7495277e1c2a..3ce05be0ee57 100644 --- a/exporter/awsemfexporter/config.go +++ b/exporter/awsemfexporter/config.go @@ -73,7 +73,7 @@ type Config struct { // Note that at the moment in order to use this feature the value "kubernetes" must also be added to the ParseJSONEncodedAttributeValues array in order to be used EKSFargateContainerInsightsEnabled bool `mapstructure:"eks_fargate_container_insights_enabled"` - // ResourceToTelemetrySettings is an option for converting resource attrihutes to telemetry attributes. + // ResourceToTelemetrySettings is an option for converting resource attributes to telemetry attributes. // "Enabled" - A boolean field to enable/disable this option. Default is `false`. // If enabled, all the resource attributes will be converted to metric labels by default. ResourceToTelemetrySettings resourcetotelemetry.Settings `mapstructure:"resource_to_telemetry_conversion"` @@ -124,7 +124,7 @@ func (config *Config) Validate() error { if _, ok := eMFSupportedUnits[descriptor.Unit]; ok { validDescriptors = append(validDescriptors, descriptor) } else { - config.logger.Warn("Dropped unsupported metric desctriptor.", zap.String("unit", descriptor.Unit)) + config.logger.Warn("Dropped unsupported metric descriptor.", zap.String("unit", descriptor.Unit)) } } config.MetricDescriptors = validDescriptors diff --git a/exporter/awsemfexporter/datapoint.go b/exporter/awsemfexporter/datapoint.go index 36d16b62de88..eb6ccc2b87b8 100644 --- a/exporter/awsemfexporter/datapoint.go +++ b/exporter/awsemfexporter/datapoint.go @@ -567,7 +567,7 @@ func getDataPoints(pmd pmetric.Metric, metadata cWMetricMetadata, logger *zap.Lo // For summaries coming from the prometheus receiver, the sum and count are cumulative, whereas for summaries // coming from other sources, e.g. SDK, the sum and count are delta by being accumulated and reset periodically. // In order to ensure metrics are sent as deltas, we check the receiver attribute (which can be injected by - // attribute processor) from resource metrics. If it exists, and equals to prometheus, the sum and count will be + // attribute processor) from resource metrics. If it exists, and is equal to prometheus, the sum and count will be // converted. // For more information: https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/prometheusreceiver/DESIGN.md#summary metricMetadata.adjustToDelta = metadata.receiver == prometheusReceiver diff --git a/exporter/awsemfexporter/datapoint_test.go b/exporter/awsemfexporter/datapoint_test.go index b182011386b2..9de974c8156d 100644 --- a/exporter/awsemfexporter/datapoint_test.go +++ b/exporter/awsemfexporter/datapoint_test.go @@ -1968,7 +1968,7 @@ func TestCreateLabels(t *testing.T) { labels := createLabels(labelsMap, "") assert.Equal(t, expectedLabels, labels) - // With isntrumentation library name + // With instrumentation library name labels = createLabels(labelsMap, "cloudwatch-otel") expectedLabels[oTellibDimensionKey] = "cloudwatch-otel" assert.Equal(t, expectedLabels, labels) @@ -1977,7 +1977,7 @@ func TestCreateLabels(t *testing.T) { func TestGetDataPoints(t *testing.T) { logger := zap.NewNop() - normalDeltraMetricMetadata := generateDeltaMetricMetadata(false, "foo", false) + normalDeltaMetricMetadata := generateDeltaMetricMetadata(false, "foo", false) cumulativeDeltaMetricMetadata := generateDeltaMetricMetadata(true, "foo", false) testCases := []struct { @@ -1991,7 +1991,7 @@ func TestGetDataPoints(t *testing.T) { name: "Int gauge", isPrometheusMetrics: false, metric: generateTestGaugeMetric("foo", intValueType), - expectedDatapointSlice: numberDataPointSlice{normalDeltraMetricMetadata, pmetric.NumberDataPointSlice{}}, + expectedDatapointSlice: numberDataPointSlice{normalDeltaMetricMetadata, pmetric.NumberDataPointSlice{}}, expectedAttributes: map[string]any{"label1": "value1"}, }, { @@ -2019,7 +2019,7 @@ func TestGetDataPoints(t *testing.T) { name: "Summary from SDK", isPrometheusMetrics: false, metric: generateTestSummaryMetric("foo"), - expectedDatapointSlice: summaryDataPointSlice{normalDeltraMetricMetadata, pmetric.SummaryDataPointSlice{}}, + expectedDatapointSlice: summaryDataPointSlice{normalDeltaMetricMetadata, pmetric.SummaryDataPointSlice{}}, expectedAttributes: map[string]any{"label1": "value1"}, }, { diff --git a/exporter/awsemfexporter/emf_exporter_test.go b/exporter/awsemfexporter/emf_exporter_test.go index a031d3d2b160..f57b941891c7 100644 --- a/exporter/awsemfexporter/emf_exporter_test.go +++ b/exporter/awsemfexporter/emf_exporter_test.go @@ -71,7 +71,7 @@ func TestConsumeMetricsWithNaNValues(t *testing.T) { generateFunc func(string) pmetric.Metrics }{ { - "histograme-with-nan", + "histogram-with-nan", generateTestHistogramMetricWithNaNs, }, { "gauge-with-nan", @@ -110,7 +110,7 @@ func TestConsumeMetricsWithInfValues(t *testing.T) { generateFunc func(string) pmetric.Metrics }{ { - "histograme-with-inf", + "histogram-with-inf", generateTestHistogramMetricWithInfs, }, { "gauge-with-inf", diff --git a/exporter/awsemfexporter/grouped_metric_test.go b/exporter/awsemfexporter/grouped_metric_test.go index c72f9b53e993..35b2ffff3f67 100644 --- a/exporter/awsemfexporter/grouped_metric_test.go +++ b/exporter/awsemfexporter/grouped_metric_test.go @@ -454,7 +454,7 @@ func TestAddKubernetesWrapper(t *testing.T) { dockerObj := struct { ContainerID string `json:"container_id"` }{ - ContainerID: "Container mccontainter the third", + ContainerID: "Container mccontainer the third", } expectedCreatedObj := struct { ContainerName string `json:"container_name"` @@ -469,7 +469,7 @@ func TestAddKubernetesWrapper(t *testing.T) { } inputs := make(map[string]string) - inputs["container_id"] = "Container mccontainter the third" + inputs["container_id"] = "Container mccontainer the third" inputs["container"] = "container mccontainer" inputs["NodeName"] = "hosty de la host" inputs["PodId"] = "Le id de Pod" diff --git a/exporter/awskinesisexporter/README.md b/exporter/awskinesisexporter/README.md index 64edd9fc41ea..85cd75436f1e 100644 --- a/exporter/awskinesisexporter/README.md +++ b/exporter/awskinesisexporter/README.md @@ -13,7 +13,7 @@ The kinesis exporter currently exports dynamic encodings to the configured kinesis stream. -The exporter relies heavily on the kinesis.PutRecords api to reduce network I/O and and reduces records into smallest atomic representation +The exporter relies heavily on the kinesis.PutRecords api to reduce network I/O and reduces records into smallest atomic representation to avoid hitting the hard limits placed on Records (No greater than 1Mb). This producer will block until the operation is done to allow for retryable and queued data to help during high loads. diff --git a/exporter/awskinesisexporter/internal/batch/batch.go b/exporter/awskinesisexporter/internal/batch/batch.go index 2c3d716f991a..7a13754134ff 100644 --- a/exporter/awskinesisexporter/internal/batch/batch.go +++ b/exporter/awskinesisexporter/internal/batch/batch.go @@ -101,7 +101,7 @@ func (b *Batch) AddRecord(raw []byte, key string) error { return nil } -// Chunk breaks up the iternal queue into blocks that can be used +// Chunk breaks up the internal queue into blocks that can be used // to be written to he kinesis.PutRecords endpoint func (b *Batch) Chunk() (chunks [][]types.PutRecordsRequestEntry) { // Using local copies to avoid mutating internal data diff --git a/exporter/awskinesisexporter/internal/producer/batcher_test.go b/exporter/awskinesisexporter/internal/producer/batcher_test.go index 9fd08c869139..f9b61e0eee17 100644 --- a/exporter/awskinesisexporter/internal/producer/batcher_test.go +++ b/exporter/awskinesisexporter/internal/producer/batcher_test.go @@ -49,7 +49,7 @@ func HardFailedPutRecordsOperation(r *kinesis.PutRecordsInput) (*kinesis.PutReco &types.ResourceNotFoundException{Message: aws.String("testing incorrect kinesis configuration")} } -func TransiantPutRecordsOperation(recoverAfter int) func(_ *kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error) { +func TransientPutRecordsOperation(recoverAfter int) func(_ *kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error) { attempt := 0 return func(r *kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error) { if attempt < recoverAfter { @@ -74,7 +74,7 @@ func TestBatchedExporter(t *testing.T) { }{ {name: "Successful put to kinesis", PutRecordsOP: SuccessfulPutRecordsOperation, shouldErr: false, isPermanent: false}, {name: "Invalid kinesis configuration", PutRecordsOP: HardFailedPutRecordsOperation, shouldErr: true, isPermanent: true}, - {name: "Test throttled kinesis operation", PutRecordsOP: TransiantPutRecordsOperation(2), shouldErr: true, isPermanent: false}, + {name: "Test throttled kinesis operation", PutRecordsOP: TransientPutRecordsOperation(2), shouldErr: true, isPermanent: false}, } bt := batch.New() diff --git a/exporter/awss3exporter/internal/upload/partition.go b/exporter/awss3exporter/internal/upload/partition.go index 86bdab5c6511..eef559637d79 100644 --- a/exporter/awss3exporter/internal/upload/partition.go +++ b/exporter/awss3exporter/internal/upload/partition.go @@ -37,7 +37,7 @@ type PartitionKeyBuilder struct { // body before uploaded. Compression configcompression.Type // UniqueKeyFunc allows for overwritting the default behavior of - // generating a new unique string to avoid collosions on file upload + // generating a new unique string to avoid collisions on file upload // across many different instances. // // TODO: Expose the ability to config additional UniqueKeyField via config @@ -81,7 +81,7 @@ func (pki *PartitionKeyBuilder) uniqueKey() string { } // This follows the original "uniqueness" algorithm - // to avoid collosions on file uploads across different nodes. + // to avoid collisions on file uploads across different nodes. const ( uniqueValues = 999999999 minOffset = 100000000 diff --git a/exporter/awss3exporter/internal/upload/partition_test.go b/exporter/awss3exporter/internal/upload/partition_test.go index 293fa58aa8c2..175388c7c6c7 100644 --- a/exporter/awss3exporter/internal/upload/partition_test.go +++ b/exporter/awss3exporter/internal/upload/partition_test.go @@ -82,14 +82,14 @@ func TestPartitionKeyInputsBucketPrefix(t *testing.T) { expect: "/year=2024/month=01/day=24/hour=06", }, { - name: "parition by minutes", + name: "partition by minutes", inputs: &PartitionKeyBuilder{ PartitionTruncation: "minute", }, expect: "/year=2024/month=01/day=24/hour=06/minute=40", }, { - name: "unknown partition trunction value", + name: "unknown partition truncation value", inputs: &PartitionKeyBuilder{ PartitionTruncation: "weekly", }, @@ -180,7 +180,7 @@ func TestPartitionKeyInputsUniqueKey(t *testing.T) { for i := 0; i < 500; i++ { uv := (&PartitionKeyBuilder{}).uniqueKey() _, ok := seen[uv] - assert.False(t, ok, "Must not have repeated parition key %q", uv) + assert.False(t, ok, "Must not have repeated partition key %q", uv) seen[uv] = struct{}{} } } diff --git a/exporter/awss3exporter/internal/upload/writer.go b/exporter/awss3exporter/internal/upload/writer.go index 282b1e96b881..d65544e5b4f4 100644 --- a/exporter/awss3exporter/internal/upload/writer.go +++ b/exporter/awss3exporter/internal/upload/writer.go @@ -63,7 +63,7 @@ func (sw *s3manager) Upload(ctx context.Context, data []byte) error { } func (sw *s3manager) contentBuffer(raw []byte) (*bytes.Buffer, error) { - //nolint: gocritic // Leaving this as a switch statemenet to make it easier to add more later compressions + //nolint: gocritic // Leaving this as a switch statement to make it easier to add more later compressions switch sw.builder.Compression { case configcompression.TypeGzip: content := bytes.NewBuffer(nil) diff --git a/exporter/awss3exporter/internal/upload/writer_test.go b/exporter/awss3exporter/internal/upload/writer_test.go index cb722933aa81..0bb322b9d8f5 100644 --- a/exporter/awss3exporter/internal/upload/writer_test.go +++ b/exporter/awss3exporter/internal/upload/writer_test.go @@ -142,7 +142,7 @@ func TestS3ManagerUpload(t *testing.T) { ) // Using a mocked virtual clock to fix the timestamp used - // to reduce the potential of flakey tests + // to reduce the potential of flaky tests mc := clock.NewMock(time.Date(2024, 0o1, 10, 10, 30, 40, 100, time.Local)) err := sm.Upload( diff --git a/exporter/awsxrayexporter/internal/translator/aws.go b/exporter/awsxrayexporter/internal/translator/aws.go index 9563b5a00ba0..e11bcb721e1c 100644 --- a/exporter/awsxrayexporter/internal/translator/aws.go +++ b/exporter/awsxrayexporter/internal/translator/aws.go @@ -124,9 +124,9 @@ func makeAws(attributes map[string]pcommon.Value, resource pcommon.Resource, log for key, value := range attributes { switch key { case conventionsv112.AttributeRPCMethod: - // Determinstically handled with if else above + // Deterministically handled with if else above case awsxray.AWSOperationAttribute: - // Determinstically handled with if else above + // Deterministically handled with if else above case awsxray.AWSAccountAttribute: if value.Type() != pcommon.ValueTypeEmpty { account = value.Str() diff --git a/exporter/awsxrayexporter/internal/translator/cause.go b/exporter/awsxrayexporter/internal/translator/cause.go index 4f6989b136e3..af1028b887c0 100644 --- a/exporter/awsxrayexporter/internal/translator/cause.go +++ b/exporter/awsxrayexporter/internal/translator/cause.go @@ -301,7 +301,7 @@ func fillJavaStacktrace(stacktrace string, exceptions []awsxray.Exception) []aws // when append causes `exceptions` to outgrow its existing // capacity, re-allocation will happen so the place // `exception` points to is no longer `exceptions[len(exceptions)-2]`, - // consequently, we can not write `exception.Cause = newException.ID` + // consequently, we cannot write `exception.Cause = newException.ID` // below. newException := &exceptions[len(exceptions)-1] exceptions[len(exceptions)-2].Cause = newException.ID @@ -398,7 +398,7 @@ func fillPythonStacktrace(stacktrace string, exceptions []awsxray.Exception) []a // when append causes `exceptions` to outgrow its existing // capacity, re-allocation will happen so the place // `exception` points to is no longer `exceptions[len(exceptions)-2]`, - // consequently, we can not write `exception.Cause = newException.ID` + // consequently, we cannot write `exception.Cause = newException.ID` // below. newException := &exceptions[len(exceptions)-1] exceptions[len(exceptions)-2].Cause = newException.ID diff --git a/exporter/awsxrayexporter/internal/translator/cause_test.go b/exporter/awsxrayexporter/internal/translator/cause_test.go index f6511d835aee..a24ad9f66056 100644 --- a/exporter/awsxrayexporter/internal/translator/cause_test.go +++ b/exporter/awsxrayexporter/internal/translator/cause_test.go @@ -1072,7 +1072,7 @@ func TestParseExceptionWithStacktraceNotJavaScript(t *testing.T) { assert.Equal(t, isRemote, *exceptions[0].Remote) } -func TestParseExceptionWithJavaScriptStactracekMalformedLines(t *testing.T) { +func TestParseExceptionWithJavaScriptStacktraceMalformedLines(t *testing.T) { exceptionType := "TypeError" message := "Cannot read property 'value' of null" // We ignore the exception type / message from the stacktrace diff --git a/exporter/awsxrayexporter/internal/translator/http.go b/exporter/awsxrayexporter/internal/translator/http.go index ccf865423d30..347f77f1cd36 100644 --- a/exporter/awsxrayexporter/internal/translator/http.go +++ b/exporter/awsxrayexporter/internal/translator/http.go @@ -147,7 +147,7 @@ func makeHTTP(span ptrace.Span) (map[string]pcommon.Value, *awsxray.HTTPData) { } func extractResponseSizeFromEvents(span ptrace.Span) int64 { - // Support insrumentation that sets response size in span or as an event. + // Support instrumentation that sets response size in span or as an event. size := extractResponseSizeFromAttributes(span.Attributes()) if size != 0 { return size diff --git a/exporter/awsxrayexporter/internal/translator/segment.go b/exporter/awsxrayexporter/internal/translator/segment.go index fb134dd7abf9..df94f4ed411e 100644 --- a/exporter/awsxrayexporter/internal/translator/segment.go +++ b/exporter/awsxrayexporter/internal/translator/segment.go @@ -739,7 +739,7 @@ func fixSegmentName(name string) string { return name } -// fixAnnotationKey removes any invalid characters from the annotaiton key. AWS X-Ray defines +// fixAnnotationKey removes any invalid characters from the annotation key. AWS X-Ray defines // the list of valid characters here: // https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html func fixAnnotationKey(key string) string { diff --git a/exporter/azuredataexplorerexporter/README.md b/exporter/azuredataexplorerexporter/README.md index 6257b43e0f83..3f6311dc8922 100644 --- a/exporter/azuredataexplorerexporter/README.md +++ b/exporter/azuredataexplorerexporter/README.md @@ -30,7 +30,7 @@ One authentication method is required: - `application_key` (no default): The cluster secret corresponding to the client id. - `tenant_id` (no default): The tenant id where the application_id is referenced from. - Managed identity: - - `managed_identity_id` (no default): The managed identity id to authenticate with. Set to "system" for system-assigned managed identity. Set the MI client Id (GUID) for user-assigned managed identity. + - `managed_identity_id` (no default): The managed identity id to authenticate with. Set to "system" for system-assigned managed identity. Set the MI client ID (GUID) for user-assigned managed identity. - Default authentication: - `use_azure_auth` (default: false): Set to true to use the Azure [default authentication](https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication?tabs=bash#2-authenticate-with-azure). @@ -61,7 +61,7 @@ exporters: azuredataexplorer: # Kusto cluster uri cluster_uri: "https://CLUSTER.kusto.windows.net" - # Client Id + # Client ID application_id: "f80da32c-108c-415c-a19e-643f461a677a" # The client secret for the client application_key: "xx-xx-xx-xx" @@ -69,7 +69,7 @@ exporters: tenant_id: "21ff9e36-fbaa-43c8-98ba-00431ea10bc3" # A managed identity id to authenticate with. # Set to "system" for system-assigned managed identity. - # Set the MI client Id (GUID) for user-assigned managed identity. + # Set the MI client ID (GUID) for user-assigned managed identity. managed_identity_id: "z80da32c-108c-415c-a19e-643f461a677a" # Database for the logs db_name: "oteldb" @@ -214,16 +214,16 @@ with ( docstring = "Histo sum count processing function", folder = "UpdatePolicy @'[{ "IsEnabled": true, "Source": "RawMetricsData","Query": "ExtractHistoCountColumns()", "IsTransactional": false, "PropagateIngestionProperties": false}]' ``` -### Opentelemetry Exporter Helper Configurations +### OpenTelemetry Exporter Helper Configurations -The ADX exporter now includes support for Opentelemetry exporter helper configurations. This feature allows you to leverage the exporter helper capabilities(retries, timeout etc.) provided natively by Otel. Read more [here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md). +The ADX exporter now includes support for OpenTelemetry exporter helper configurations. This feature allows you to leverage the exporter helper capabilities(retries, timeout etc.) provided natively by Otel. Read more about the [exporterhelper](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md). -Please note that this configuration is not enabled by default. To utilize the Opentelemetry exporter helper, you will need to add it manually to the configuration. +Please note that this configuration is not enabled by default. To utilize the OpenTelemetry exporter helper, you will need to add it manually to the configuration. #### Example Configuration ```yaml -# Example Opentelemetry Exporter Configuration +# Example OpenTelemetry Exporter Configuration timeout: 10s sending_queue: enabled: true diff --git a/exporter/azuredataexplorerexporter/testdata/config.yaml b/exporter/azuredataexplorerexporter/testdata/config.yaml index 33ca1d14793d..766c9ad1a511 100644 --- a/exporter/azuredataexplorerexporter/testdata/config.yaml +++ b/exporter/azuredataexplorerexporter/testdata/config.yaml @@ -1,7 +1,7 @@ azuredataexplorer: # Kusto cluster uri cluster_uri: "https://CLUSTER.kusto.windows.net" - # Client Id + # Client ID application_id: "f80da32c-108c-415c-a19e-643f461a677a" # The client secret for the client application_key: "xx-xx-xx-xx" @@ -20,7 +20,7 @@ azuredataexplorer: azuredataexplorer/2: # Kusto cluster uri cluster_uri: "https://CLUSTER.kusto.windows.net" - # Client Id + # Client ID application_id: "" # The client secret for the client application_key: "xx-xx-xx-xx" @@ -40,7 +40,7 @@ azuredataexplorer/2: azuredataexplorer/3: # Kusto cluster uri cluster_uri: "https://CLUSTER.kusto.windows.net" - # Client Id + # Client ID application_id: "f80da32c-108c-415c-a19e-643f461a677a" # The client secret for the client application_key: "xx-xx-xx-xx" @@ -119,7 +119,7 @@ azuredataexplorer/7: azuredataexplorer/8: # Kusto cluster uri cluster_uri: "https://CLUSTER.kusto.windows.net" - # Client Id + # Client ID application_id: "f80da32c-108c-415c-a19e-643f461a677a" # The client secret for the client application_key: "xx-xx-xx-xx" diff --git a/exporter/azuremonitorexporter/README.md b/exporter/azuremonitorexporter/README.md index 27c5bc1993bf..0b937a4c6788 100644 --- a/exporter/azuremonitorexporter/README.md +++ b/exporter/azuremonitorexporter/README.md @@ -103,7 +103,7 @@ The exporter follows the semantic conventions to fill the Application Insights s | Dependency.ResultCode | `http.status_code` or `status_code` | `"0"` | | Dependency.Success | `http.status_code` or `status_code` | `true` | -The exact mapping can be found [here](trace_to_envelope.go). +The exact mapping can be found in [trace_to_envelope.go](trace_to_envelope.go). All attributes are also mapped to custom properties if they are booleans or strings and to custom measurements if they are ints or doubles. diff --git a/exporter/carbonexporter/README.md b/exporter/carbonexporter/README.md index 8c37bebf218c..8e02f26b73be 100644 --- a/exporter/carbonexporter/README.md +++ b/exporter/carbonexporter/README.md @@ -39,8 +39,8 @@ exporters: timeout: 10s ``` -The full list of settings exposed for this receiver are documented [here](./config.go) -with detailed sample configurations [here](./testdata/config.yaml). +The full list of settings exposed for this receiver are documented in [config.go](./config.go) +with detailed sample configurations in [testdata/config.yaml](./testdata/config.yaml). ## Advanced Configuration diff --git a/exporter/clickhouseexporter/README.md b/exporter/clickhouseexporter/README.md index aa8d8ea9808d..4beea98f17f4 100644 --- a/exporter/clickhouseexporter/README.md +++ b/exporter/clickhouseexporter/README.md @@ -36,7 +36,7 @@ as [ClickHouse document says:](https://clickhouse.com/docs/en/introduction/perfo #### Official ClickHouse Plugin for Grafana The official [ClickHouse Datasource for Grafana](https://grafana.com/grafana/plugins/grafana-clickhouse-datasource/) contains features that integrate directly with this exporter. -You can view associated [logs](https://clickhouse.com/docs/en/integrations/grafana/query-builder#logs) and [traces](https://clickhouse.com/docs/en/integrations/grafana/query-builder#traces), as well as visualize other queries such as tables and time series graphs. Learn how to configure the OpenTelemetry integration [here](https://clickhouse.com/docs/en/integrations/grafana/config#opentelemetry). +You can view associated [logs](https://clickhouse.com/docs/en/integrations/grafana/query-builder#logs) and [traces](https://clickhouse.com/docs/en/integrations/grafana/query-builder#traces), as well as visualize other queries such as tables and time series graphs. Learn [how to configure the OpenTelemetry integration](https://clickhouse.com/docs/en/integrations/grafana/config#opentelemetry). #### Altinity's ClickHouse Plugin for Grafana diff --git a/exporter/coralogixexporter/config_test.go b/exporter/coralogixexporter/config_test.go index fe6bf96e79a5..d83c42cb59ee 100644 --- a/exporter/coralogixexporter/config_test.go +++ b/exporter/coralogixexporter/config_test.go @@ -252,13 +252,13 @@ func TestDomainWithAllExporters(t *testing.T) { assert.NoError(t, le.shutdown(context.Background())) } -func TestEndpoindsAndDomainWithAllExporters(t *testing.T) { +func TestEndpointsAndDomainWithAllExporters(t *testing.T) { cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) require.NoError(t, err) factory := NewFactory() cfg := factory.CreateDefaultConfig() - sub, err := cm.Sub(component.NewIDWithName(metadata.Type, "domain_endoints").String()) + sub, err := cm.Sub(component.NewIDWithName(metadata.Type, "domain_endpoints").String()) require.NoError(t, err) require.NoError(t, sub.Unmarshal(cfg)) diff --git a/exporter/coralogixexporter/testdata/config.yaml b/exporter/coralogixexporter/testdata/config.yaml index ca33cbeb4d2b..162c5839fc7d 100644 --- a/exporter/coralogixexporter/testdata/config.yaml +++ b/exporter/coralogixexporter/testdata/config.yaml @@ -49,7 +49,7 @@ coralogix/domain: subsystem_name: "SUBSYSTEM_NAME" timeout: 5s -coralogix/domain_endoints: +coralogix/domain_endpoints: domain: "coralogix.com" traces: endpoint: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/exporter/datadogexporter/examples/collector.yaml b/exporter/datadogexporter/examples/collector.yaml index 711d8b22eda5..9b4bfa277d20 100644 --- a/exporter/datadogexporter/examples/collector.yaml +++ b/exporter/datadogexporter/examples/collector.yaml @@ -283,7 +283,7 @@ exporters: # # mode: distributions - ## Deprecated [v0.75.0]: use `send_aggreggations` instead. + ## Deprecated [v0.75.0]: use `send_aggregations` instead. ## @param send_count_sum_metrics - boolean - optional - default: false ## Whether to report sum, count, min and max as separate histogram metrics. # diff --git a/exporter/datadogexporter/internal/logs/sender.go b/exporter/datadogexporter/internal/logs/sender.go index 7aa6d26cec4d..0b2a527e32e4 100644 --- a/exporter/datadogexporter/internal/logs/sender.go +++ b/exporter/datadogexporter/internal/logs/sender.go @@ -22,7 +22,7 @@ type Sender struct { } // logsV2 is the key in datadog ServerConfiguration -// It is being used to customize the endpoint for datdog intake based on exporter configuration +// It is being used to customize the endpoint for Datadog intake based on exporter configuration // https://github.com/DataDog/datadog-api-client-go/blob/be7e034424012c7ee559a2153802a45df73232ea/api/datadog/configuration.go#L308 const logsV2 = "v2.LogsApi.SubmitLog" diff --git a/exporter/datadogexporter/logs_exporter.go b/exporter/datadogexporter/logs_exporter.go index 784db1572889..d8a8907c7105 100644 --- a/exporter/datadogexporter/logs_exporter.go +++ b/exporter/datadogexporter/logs_exporter.go @@ -98,7 +98,7 @@ func newLogsExporter( var _ consumer.ConsumeLogsFunc = (*logsExporter)(nil).consumeLogs -// consumeLogs is implementation of cosumer.ConsumeLogsFunc +// consumeLogs is implementation of consumer.ConsumeLogsFunc func (exp *logsExporter) consumeLogs(ctx context.Context, ld plog.Logs) (err error) { defer func() { err = exp.scrubber.Scrub(err) }() if exp.cfg.HostMetadata.Enabled { diff --git a/exporter/datadogexporter/metrics_exporter.go b/exporter/datadogexporter/metrics_exporter.go index 89d8260eb3c1..b29891483a85 100644 --- a/exporter/datadogexporter/metrics_exporter.go +++ b/exporter/datadogexporter/metrics_exporter.go @@ -33,7 +33,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/scrub" ) -var metricRemappingDisableddFeatureGate = featuregate.GlobalRegistry().MustRegister( +var metricRemappingDisabledFeatureGate = featuregate.GlobalRegistry().MustRegister( "exporter.datadogexporter.metricremappingdisabled", featuregate.StageAlpha, featuregate.WithRegisterDescription("When enabled the Datadog Exporter remaps OpenTelemetry semantic conventions to Datadog semantic conventions. This feature gate is only for internal use."), @@ -42,7 +42,7 @@ var metricRemappingDisableddFeatureGate = featuregate.GlobalRegistry().MustRegis // isMetricRemappingDisabled returns true if the datadogexporter should generate Datadog-compliant metrics from OpenTelemetry metrics func isMetricRemappingDisabled() bool { - return metricRemappingDisableddFeatureGate.IsEnabled() + return metricRemappingDisabledFeatureGate.IsEnabled() } type metricsExporter struct { diff --git a/exporter/datadogexporter/metrics_exporter_test.go b/exporter/datadogexporter/metrics_exporter_test.go index 95935fd053a4..dcad0d25e19d 100644 --- a/exporter/datadogexporter/metrics_exporter_test.go +++ b/exporter/datadogexporter/metrics_exporter_test.go @@ -344,7 +344,7 @@ func Test_metricsExporter_PushMetricsData(t *testing.T) { }, } for _, tt := range tests { - t.Run(fmt.Sprintf("kind=%s,histgramMode=%s", tt.source.Kind, tt.histogramMode), func(t *testing.T) { + t.Run(fmt.Sprintf("kind=%s,histogramMode=%s", tt.source.Kind, tt.histogramMode), func(t *testing.T) { seriesRecorder := &testutil.HTTPRequestRecorder{Pattern: testutil.MetricV2Endpoint} sketchRecorder := &testutil.HTTPRequestRecorder{Pattern: testutil.SketchesMetricEndpoint} server := testutil.DatadogServerMock( @@ -788,7 +788,7 @@ func Test_metricsExporter_PushMetricsData_Zorkian(t *testing.T) { }, } for _, tt := range tests { - t.Run(fmt.Sprintf("kind=%s,histgramMode=%s", tt.source.Kind, tt.histogramMode), func(t *testing.T) { + t.Run(fmt.Sprintf("kind=%s,histogramMode=%s", tt.source.Kind, tt.histogramMode), func(t *testing.T) { seriesRecorder := &testutil.HTTPRequestRecorder{Pattern: testutil.MetricV1Endpoint} sketchRecorder := &testutil.HTTPRequestRecorder{Pattern: testutil.SketchesMetricEndpoint} server := testutil.DatadogServerMock( diff --git a/exporter/datadogexporter/traces_exporter.go b/exporter/datadogexporter/traces_exporter.go index a510c7e88ae4..b1f3e6cd5359 100644 --- a/exporter/datadogexporter/traces_exporter.go +++ b/exporter/datadogexporter/traces_exporter.go @@ -45,8 +45,8 @@ type traceExporter struct { params exporter.Settings cfg *Config ctx context.Context // ctx triggers shutdown upon cancellation - client *zorkian.Client // client sends runnimg metrics to backend & performs API validation - metricsAPI *datadogV2.MetricsApi // client sends runnimg metrics to backend + client *zorkian.Client // client sends running metrics to backend & performs API validation + metricsAPI *datadogV2.MetricsApi // client sends running metrics to backend scrubber scrub.Scrubber // scrubber scrubs sensitive information from error messages onceMetadata *sync.Once // onceMetadata ensures that metadata is sent only once across all exporters agent *agent.Agent // agent processes incoming traces diff --git a/exporter/datasetexporter/traces_exporter.go b/exporter/datasetexporter/traces_exporter.go index c1fde0ae0935..78674249dd9a 100644 --- a/exporter/datasetexporter/traces_exporter.go +++ b/exporter/datasetexporter/traces_exporter.go @@ -53,7 +53,7 @@ func buildEventFromSpan( } attrs["sca:schema"] = "tracing" - attrs["sca:schemVer"] = 1 + attrs["sca:schemaVer"] = 1 attrs["sca:type"] = "span" attrs["name"] = span.Name() diff --git a/exporter/datasetexporter/traces_exporter_test.go b/exporter/datasetexporter/traces_exporter_test.go index 0c905ce021cf..0eb7ec3fae6d 100644 --- a/exporter/datasetexporter/traces_exporter_test.go +++ b/exporter/datasetexporter/traces_exporter_test.go @@ -25,9 +25,9 @@ func generateTEvent1Raw() *add_events.Event { Ts: "1581452772000000321", ServerHost: "foo", Attrs: map[string]any{ - "sca:schemVer": 1, - "sca:schema": "tracing", - "sca:type": "span", + "sca:schemaVer": 1, + "sca:schema": "tracing", + "sca:type": "span", "name": "operationA", "kind": "unspecified", @@ -54,9 +54,9 @@ func generateTEvent2Raw() *add_events.Event { Ts: "1581452772000000321", ServerHost: "foo", Attrs: map[string]any{ - "sca:schemVer": 1, - "sca:schema": "tracing", - "sca:type": "span", + "sca:schemaVer": 1, + "sca:schema": "tracing", + "sca:type": "span", "name": "operationB", "kind": "unspecified", @@ -84,9 +84,9 @@ func generateTEvent3Raw() *add_events.Event { Ts: "1581452772000000321", ServerHost: "valServerHost", Attrs: map[string]any{ - "sca:schemVer": 1, - "sca:schema": "tracing", - "sca:type": "span", + "sca:schemaVer": 1, + "sca:schema": "tracing", + "sca:type": "span", "name": "operationC", "kind": "unspecified", @@ -152,9 +152,9 @@ func TestBuildEventsFromSpanAttributesCollision(t *testing.T) { Sev: 9, Ts: "0", Attrs: map[string]any{ - "sca:schemVer": 1, - "sca:schema": "tracing", - "sca:type": "span", + "sca:schemaVer": 1, + "sca:schema": "tracing", + "sca:type": "span", "name": "", "kind": "unspecified", @@ -208,9 +208,9 @@ func TestBuildEventsFromSpanAttributesDifferentTypes(t *testing.T) { Sev: 9, Ts: "0", Attrs: map[string]any{ - "sca:schemVer": 1, - "sca:schema": "tracing", - "sca:type": "span", + "sca:schemaVer": 1, + "sca:schema": "tracing", + "sca:type": "span", "name": "", "kind": "unspecified", @@ -268,9 +268,9 @@ func TestBuildEventFromSpan(t *testing.T) { name: "Default", settings: newDefaultTracesSettings(), expected: add_events.EventAttrs{ - "sca:schemVer": 1, - "sca:schema": "tracing", - "sca:type": "span", + "sca:schemaVer": 1, + "sca:schema": "tracing", + "sca:type": "span", "name": "", "kind": "unspecified", @@ -304,9 +304,9 @@ func TestBuildEventFromSpan(t *testing.T) { }, }, expected: add_events.EventAttrs{ - "sca:schemVer": 1, - "sca:schema": "tracing", - "sca:type": "span", + "sca:schemaVer": 1, + "sca:schema": "tracing", + "sca:type": "span", "name": "", "kind": "unspecified", @@ -340,9 +340,9 @@ func TestBuildEventFromSpan(t *testing.T) { }, }, expected: add_events.EventAttrs{ - "sca:schemVer": 1, - "sca:schema": "tracing", - "sca:type": "span", + "sca:schemaVer": 1, + "sca:schema": "tracing", + "sca:type": "span", "name": "filled_nameA", "kind": "unspecified", @@ -608,9 +608,9 @@ func generateSimpleEvent( serverHost string, ) *add_events.Event { attrs := map[string]any{ - "sca:schemVer": 1, - "sca:schema": "tracing", - "sca:type": "span", + "sca:schemaVer": 1, + "sca:schema": "tracing", + "sca:type": "span", "name": "", "kind": "unspecified", diff --git a/exporter/dorisexporter/metrics_exponential_histogram.go b/exporter/dorisexporter/metrics_exponential_histogram.go index 1498546f3229..75591990b30c 100644 --- a/exporter/dorisexporter/metrics_exponential_histogram.go +++ b/exporter/dorisexporter/metrics_exponential_histogram.go @@ -57,11 +57,11 @@ func (m *metricModelExponentialHistogram) add(pm pmetric.Metric, dm *dMetric, e dp := dataPoints.At(i) exemplars := dp.Exemplars() - newExeplars := make([]*dExemplar, 0, exemplars.Len()) + newExemplars := make([]*dExemplar, 0, exemplars.Len()) for j := 0; j < exemplars.Len(); j++ { exemplar := exemplars.At(j) - newExeplar := &dExemplar{ + newExemplar := &dExemplar{ FilteredAttributes: exemplar.FilteredAttributes().AsRaw(), Timestamp: e.formatTime(exemplar.Timestamp().AsTime()), Value: e.getExemplarValue(exemplar), @@ -69,7 +69,7 @@ func (m *metricModelExponentialHistogram) add(pm pmetric.Metric, dm *dMetric, e TraceID: exemplar.TraceID().String(), } - newExeplars = append(newExeplars, newExeplar) + newExemplars = append(newExemplars, newExemplar) } positiveBucketCounts := dp.Positive().BucketCounts() @@ -97,7 +97,7 @@ func (m *metricModelExponentialHistogram) add(pm pmetric.Metric, dm *dMetric, e PositiveBucketCounts: newPositiveBucketCounts, NegativeOffset: dp.Negative().Offset(), NegativeBucketCounts: newNegativeBucketCounts, - Exemplars: newExeplars, + Exemplars: newExemplars, Min: dp.Min(), Max: dp.Max(), ZeroThreshold: dp.ZeroThreshold(), diff --git a/exporter/dorisexporter/metrics_gauge.go b/exporter/dorisexporter/metrics_gauge.go index a5cf1b9388dd..e8f3a7b733a2 100644 --- a/exporter/dorisexporter/metrics_gauge.go +++ b/exporter/dorisexporter/metrics_gauge.go @@ -46,11 +46,11 @@ func (m *metricModelGauge) add(pm pmetric.Metric, dm *dMetric, e *metricsExporte dp := dataPoints.At(i) exemplars := dp.Exemplars() - newExeplars := make([]*dExemplar, 0, exemplars.Len()) + newExemplars := make([]*dExemplar, 0, exemplars.Len()) for j := 0; j < exemplars.Len(); j++ { exemplar := exemplars.At(j) - newExeplar := &dExemplar{ + newExemplar := &dExemplar{ FilteredAttributes: exemplar.FilteredAttributes().AsRaw(), Timestamp: e.formatTime(exemplar.Timestamp().AsTime()), Value: e.getExemplarValue(exemplar), @@ -58,7 +58,7 @@ func (m *metricModelGauge) add(pm pmetric.Metric, dm *dMetric, e *metricsExporte TraceID: exemplar.TraceID().String(), } - newExeplars = append(newExeplars, newExeplar) + newExemplars = append(newExemplars, newExemplar) } metric := &dMetricGauge{ @@ -67,7 +67,7 @@ func (m *metricModelGauge) add(pm pmetric.Metric, dm *dMetric, e *metricsExporte Attributes: dp.Attributes().AsRaw(), StartTime: e.formatTime(dp.StartTimestamp().AsTime()), Value: e.getNumberDataPointValue(dp), - Exemplars: newExeplars, + Exemplars: newExemplars, } m.data = append(m.data, metric) } diff --git a/exporter/dorisexporter/metrics_histogram.go b/exporter/dorisexporter/metrics_histogram.go index 18d1b3f3afdb..b29418023776 100644 --- a/exporter/dorisexporter/metrics_histogram.go +++ b/exporter/dorisexporter/metrics_histogram.go @@ -52,11 +52,11 @@ func (m *metricModelHistogram) add(pm pmetric.Metric, dm *dMetric, e *metricsExp dp := dataPoints.At(i) exemplars := dp.Exemplars() - newExeplars := make([]*dExemplar, 0, exemplars.Len()) + newExemplars := make([]*dExemplar, 0, exemplars.Len()) for j := 0; j < exemplars.Len(); j++ { exemplar := exemplars.At(j) - newExeplar := &dExemplar{ + newExemplar := &dExemplar{ FilteredAttributes: exemplar.FilteredAttributes().AsRaw(), Timestamp: e.formatTime(exemplar.Timestamp().AsTime()), Value: e.getExemplarValue(exemplar), @@ -64,7 +64,7 @@ func (m *metricModelHistogram) add(pm pmetric.Metric, dm *dMetric, e *metricsExp TraceID: exemplar.TraceID().String(), } - newExeplars = append(newExeplars, newExeplar) + newExemplars = append(newExemplars, newExemplar) } bucketCounts := dp.BucketCounts() @@ -88,7 +88,7 @@ func (m *metricModelHistogram) add(pm pmetric.Metric, dm *dMetric, e *metricsExp Sum: dp.Sum(), BucketCounts: newBucketCounts, ExplicitBounds: newExplicitBounds, - Exemplars: newExeplars, + Exemplars: newExemplars, Min: dp.Min(), Max: dp.Max(), AggregationTemporality: pm.Histogram().AggregationTemporality().String(), diff --git a/exporter/dorisexporter/metrics_sum.go b/exporter/dorisexporter/metrics_sum.go index 56c66ba86419..118593dac011 100644 --- a/exporter/dorisexporter/metrics_sum.go +++ b/exporter/dorisexporter/metrics_sum.go @@ -48,11 +48,11 @@ func (m *metricModelSum) add(pm pmetric.Metric, dm *dMetric, e *metricsExporter) dp := dataPoints.At(i) exemplars := dp.Exemplars() - newExeplars := make([]*dExemplar, 0, exemplars.Len()) + newExemplars := make([]*dExemplar, 0, exemplars.Len()) for j := 0; j < exemplars.Len(); j++ { exemplar := exemplars.At(j) - newExeplar := &dExemplar{ + newExemplar := &dExemplar{ FilteredAttributes: exemplar.FilteredAttributes().AsRaw(), Timestamp: e.formatTime(exemplar.Timestamp().AsTime()), Value: e.getExemplarValue(exemplar), @@ -60,7 +60,7 @@ func (m *metricModelSum) add(pm pmetric.Metric, dm *dMetric, e *metricsExporter) TraceID: exemplar.TraceID().String(), } - newExeplars = append(newExeplars, newExeplar) + newExemplars = append(newExemplars, newExemplar) } metric := &dMetricSum{ @@ -69,7 +69,7 @@ func (m *metricModelSum) add(pm pmetric.Metric, dm *dMetric, e *metricsExporter) Attributes: dp.Attributes().AsRaw(), StartTime: e.formatTime(dp.StartTimestamp().AsTime()), Value: e.getNumberDataPointValue(dp), - Exemplars: newExeplars, + Exemplars: newExemplars, AggregationTemporality: pm.Sum().AggregationTemporality().String(), IsMonotonic: pm.Sum().IsMonotonic(), } diff --git a/exporter/elasticsearchexporter/internal/objmodel/objmodel.go b/exporter/elasticsearchexporter/internal/objmodel/objmodel.go index 0f514e06aaaa..45afc7b5b6aa 100644 --- a/exporter/elasticsearchexporter/internal/objmodel/objmodel.go +++ b/exporter/elasticsearchexporter/internal/objmodel/objmodel.go @@ -5,8 +5,8 @@ // JSON documents. // // The JSON parsing in Elasticsearch does not support parsing JSON documents -// with duplicate fields. The fields in the docuemt can be sort and duplicate entries -// can be removed before serializing. Deduplication ensures that ambigious +// with duplicate fields. The fields in the document can be sort and duplicate entries +// can be removed before serializing. Deduplication ensures that ambiguous // events can still be indexed. // // With attributes map encoded as a list of key value diff --git a/exporter/elasticsearchexporter/model_test.go b/exporter/elasticsearchexporter/model_test.go index dddf46a14a01..24ec499fa2cb 100644 --- a/exporter/elasticsearchexporter/model_test.go +++ b/exporter/elasticsearchexporter/model_test.go @@ -27,7 +27,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter/internal/objmodel" ) -var expectedSpanBody = `{"@timestamp":"2023-04-19T03:04:05.000000006Z","Attributes.service.instance.id":"23","Duration":1000000,"EndTimestamp":"2023-04-19T03:04:06.000000006Z","Events.fooEvent.evnetMockBar":"bar","Events.fooEvent.evnetMockFoo":"foo","Events.fooEvent.time":"2023-04-19T03:04:05.000000006Z","Kind":"SPAN_KIND_CLIENT","Link":"[{\"attribute\":{},\"spanID\":\"\",\"traceID\":\"01020304050607080807060504030200\"}]","Name":"client span","Resource.cloud.platform":"aws_elastic_beanstalk","Resource.cloud.provider":"aws","Resource.deployment.environment":"BETA","Resource.service.instance.id":"23","Resource.service.name":"some-service","Resource.service.version":"env-version-1234","Scope.lib-foo":"lib-bar","Scope.name":"io.opentelemetry.rabbitmq-2.7","Scope.version":"1.30.0-alpha","SpanId":"1920212223242526","TraceId":"01020304050607080807060504030201","TraceStatus":2,"TraceStatusDescription":"Test"}` +var expectedSpanBody = `{"@timestamp":"2023-04-19T03:04:05.000000006Z","Attributes.service.instance.id":"23","Duration":1000000,"EndTimestamp":"2023-04-19T03:04:06.000000006Z","Events.fooEvent.eventMockBar":"bar","Events.fooEvent.eventMockFoo":"foo","Events.fooEvent.time":"2023-04-19T03:04:05.000000006Z","Kind":"SPAN_KIND_CLIENT","Link":"[{\"attribute\":{},\"spanID\":\"\",\"traceID\":\"01020304050607080807060504030200\"}]","Name":"client span","Resource.cloud.platform":"aws_elastic_beanstalk","Resource.cloud.provider":"aws","Resource.deployment.environment":"BETA","Resource.service.instance.id":"23","Resource.service.name":"some-service","Resource.service.version":"env-version-1234","Scope.lib-foo":"lib-bar","Scope.name":"io.opentelemetry.rabbitmq-2.7","Scope.version":"1.30.0-alpha","SpanId":"1920212223242526","TraceId":"01020304050607080807060504030201","TraceStatus":2,"TraceStatusDescription":"Test"}` var expectedLogBody = `{"@timestamp":"2023-04-19T03:04:05.000000006Z","Attributes.log-attr1":"value1","Body":"log-body","Resource.key1":"value1","Scope.name":"","Scope.version":"","SeverityNumber":0,"TraceFlags":0}` @@ -181,8 +181,8 @@ func mockResourceSpans() ptrace.Traces { event := span.Events().AppendEmpty() event.SetName("fooEvent") event.SetTimestamp(pcommon.NewTimestampFromTime(tStart)) - event.Attributes().PutStr("evnetMockFoo", "foo") - event.Attributes().PutStr("evnetMockBar", "bar") + event.Attributes().PutStr("eventMockFoo", "foo") + event.Attributes().PutStr("eventMockBar", "bar") return traces } diff --git a/exporter/fileexporter/config.go b/exporter/fileexporter/config.go index 43499f70e105..d4bcd9b2c36c 100644 --- a/exporter/fileexporter/config.go +++ b/exporter/fileexporter/config.go @@ -119,7 +119,7 @@ func (cfg *Config) Validate() error { if cfg.GroupBy != nil && cfg.GroupBy.Enabled { pathParts := strings.Split(cfg.Path, "*") if len(pathParts) != 2 { - return errors.New("path must contain exatcly one * when group_by is enabled") + return errors.New("path must contain exactly one * when group_by is enabled") } if len(pathParts[0]) == 0 { diff --git a/exporter/fileexporter/config_test.go b/exporter/fileexporter/config_test.go index 675e8b7b3e9f..decd722763c4 100644 --- a/exporter/fileexporter/config_test.go +++ b/exporter/fileexporter/config_test.go @@ -175,7 +175,7 @@ func TestLoadConfig(t *testing.T) { }, { id: component.NewIDWithName(metadata.Type, "group_by_invalid_path"), - errorMessage: "path must contain exatcly one * when group_by is enabled", + errorMessage: "path must contain exactly one * when group_by is enabled", }, { id: component.NewIDWithName(metadata.Type, "group_by_invalid_path2"), diff --git a/exporter/fileexporter/encoding_test.go b/exporter/fileexporter/encoding_test.go index afc62a0a3784..9410d4b029be 100644 --- a/exporter/fileexporter/encoding_test.go +++ b/exporter/fileexporter/encoding_test.go @@ -94,14 +94,14 @@ func generateLogs() plog.Logs { } func generateProfiles() pprofile.Profiles { - proflies := pprofile.NewProfiles() - rp := proflies.ResourceProfiles().AppendEmpty() + profiles := pprofile.NewProfiles() + rp := profiles.ResourceProfiles().AppendEmpty() rp.Resource().Attributes().PutStr("resource", "R1") p := rp.ScopeProfiles().AppendEmpty().Profiles().AppendEmpty() p.SetProfileID(pprofile.NewProfileIDEmpty()) p.SetStartTime(pcommon.NewTimestampFromTime(time.Now().Add(-1 * time.Second))) p.SetDuration(pcommon.Timestamp(1 * time.Second / time.Nanosecond)) - return proflies + return profiles } func generateMetrics() pmetric.Metrics { diff --git a/exporter/fileexporter/marshaller.go b/exporter/fileexporter/marshaller.go index 166d42c4ce0d..69bc78f1ee7a 100644 --- a/exporter/fileexporter/marshaller.go +++ b/exporter/fileexporter/marshaller.go @@ -14,7 +14,7 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" ) -// Marshaler configuration used for marhsaling Protobuf +// Marshaler configuration used for marshaling Protobuf var tracesMarshalers = map[string]ptrace.Marshaler{ formatTypeJSON: &ptrace.JSONMarshaler{}, formatTypeProto: &ptrace.ProtoMarshaler{}, diff --git a/exporter/googlecloudexporter/README.md b/exporter/googlecloudexporter/README.md index 8a7754f45b8a..8b99770dc83d 100644 --- a/exporter/googlecloudexporter/README.md +++ b/exporter/googlecloudexporter/README.md @@ -177,7 +177,7 @@ The following configuration options are supported: - `impersonate` (optional): Configuration for service account impersonation - `target_principal`: TargetPrincipal is the email address of the service account to impersonate. - `subject`: (optional) Subject is the sub field of a JWT. This field should only be set if you wish to impersonate as a user. This feature is useful when using domain wide delegation. - - `delegates`: (default = []) Delegates are the service account email addresses in a delegation chain. Each service account must be granted roles/iam.serviceAccountTokenCreatoron the next service account in the chain. + - `delegates`: (default = []) Delegates are the service account email addresses in a delegation chain. Each service account must be granted roles/iam.serviceAccountTokenCreator on the next service account in the chain. - `metric` (optional): Configuration for sending metrics to Cloud Monitoring. - `prefix` (default = `workload.googleapis.com`): The prefix to add to metrics. - `endpoint` (default = monitoring.googleapis.com): Endpoint where metric data is going to be sent to. @@ -193,7 +193,7 @@ The following configuration options are supported: - `resource_filters` (default = []): If provided, resource attributes matching any filter will be included in metric labels. Can be defined by `prefix`, `regex`, or `prefix` AND `regex`. - `prefix`: Match resource keys by prefix. - `regex`: Match resource keys by regex. - - `cumulative_normalization` (default = true): If true, normalizes cumulative metrics without start times or with explicit reset points by subtracting subsequent points from the initial point. It is enabled by default. Since it caches starting points, it may result inincreased memory usage. + - `cumulative_normalization` (default = true): If true, normalizes cumulative metrics without start times or with explicit reset points by subtracting subsequent points from the initial point. It is enabled by default. Since it caches starting points, it may result in increased memory usage. - `sum_of_squared_deviation` (default = false): If true, enables calculation of an estimated sum of squared deviation. It is an estimate, and is not exact. - `experimental_wal` (default = []): If provided, enables use of a write ahead log for time series requests. @@ -216,7 +216,7 @@ The following configuration options are supported: - `resource_filters` (default = []): If provided, resource attributes matching any filter will be included in log labels. Can be defined by `prefix`, `regex`, or `prefix` AND `regex`. - `prefix`: Match resource keys by prefix. - `regex`: Match resource keys by regex. - - `compression` (optional): Enable gzip compression for gRPC requests (valid vlaues: `gzip`). + - `compression` (optional): Enable gzip compression for gRPC requests (valid values: `gzip`). - `sending_queue` (optional): Configuration for how to buffer traces before sending. - `enabled` (default = true) - `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false` @@ -244,7 +244,7 @@ For metrics and logs, this exporter maps the OpenTelemetry Resource to a Google Cloud [Logging](https://cloud.google.com/logging/docs/api/v2/resource-list) or [Monitoring](https://cloud.google.com/monitoring/api/resources) Monitored Resource. -The complete mapping logic can be found [here](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/main/internal/resourcemapping/resourcemapping.go). +The complete mapping logic can be found in [resourcemapping.go](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/main/internal/resourcemapping/resourcemapping.go). That may be the most helpful reference if you want to map to a specific monitored resource. diff --git a/exporter/googlecloudpubsubexporter/watermark_test.go b/exporter/googlecloudpubsubexporter/watermark_test.go index f01ad2814b61..82b838b408c0 100644 --- a/exporter/googlecloudpubsubexporter/watermark_test.go +++ b/exporter/googlecloudpubsubexporter/watermark_test.go @@ -95,7 +95,7 @@ func TestEarliestLogsWatermarkOutDrift(t *testing.T) { assert.Equal(t, tsBefore1m, out) } -func TestEarliestTracessWatermarkInDrift(t *testing.T) { +func TestEarliestTracesWatermarkInDrift(t *testing.T) { out := earliestTracesWatermark(tracesData, tsRef, time.Hour) assert.Equal(t, tsBefore5m, out) } diff --git a/exporter/googlemanagedprometheusexporter/README.md b/exporter/googlemanagedprometheusexporter/README.md index edac0117be2e..b98a34b865fa 100644 --- a/exporter/googlemanagedprometheusexporter/README.md +++ b/exporter/googlemanagedprometheusexporter/README.md @@ -125,7 +125,7 @@ The Google Managed Prometheus exporter maps metrics to the [prometheus_target](https://cloud.google.com/monitoring/api/resources#tag_prometheus_target) monitored resource. The logic for mapping to monitored resources is designed to be used with the prometheus receiver, but can be used with other receivers as -well. To avoid collisions (i.e. "duplicate timeseries enountered" errors), you +well. To avoid collisions (i.e. "duplicate timeseries encountered" errors), you need to ensure the prometheus_target resource uniquely identifies the source of metrics. The exporter uses the following resource attributes to determine monitored resource: @@ -202,13 +202,13 @@ processors: Error: `Value type for metric conflicts with the existing value type` -Google Managed Service for Promethueus (and Google Cloud Monitoring) have fixed +Google Managed Service for Prometheus (and Google Cloud Monitoring) have fixed value types (INT and DOUBLE) for metrics. Once a metric has been written as an INT or DOUBLE, attempting to write the other type will fail with the error above. This commonly occurs when a metric's value type has changed, or when a mix of INT and DOUBLE for the same metric are being written to the same project. The recommended way to fix this is to convert all metrics to DOUBLE to -prevent collisions using the `exporter.googlemanagedpromethues.intToDouble` +prevent collisions using the `exporter.googlemanagedprometheus.intToDouble` feature gate, documented above. Once you enable the feature gate, you will likely see new errors indicating @@ -224,7 +224,7 @@ written as a double going forward. The simplest way to do this is by using the Error: `One or more points were written more frequently than the maximum sampling period configured for the metric.` -Google Managed Service for Promethueus (and Google Cloud Monitoring) +Google Managed Service for Prometheus (and Google Cloud Monitoring) [limit](https://cloud.google.com/monitoring/quotas#custom_metrics_quotas) the rate at which points can be written to one point every 5 seconds. If you try to write points more frequently, you will encounter the error above. If you know @@ -281,7 +281,7 @@ by applications in a way that uniquely identifies each instance. The next most common reason is (2), which means that the exporter's mapping logic from OpenTelemetry resource to Google Cloud's `prometheus_target` -monitored resouce didn't preserve a resource attribute that was needed to +monitored resource didn't preserve a resource attribute that was needed to distinguish timeseries. This can be mitigated by adding resource attributes as metric labels using `resource_filters` configuration in the exporter. The following example adds common identifying resource attributes. @@ -317,5 +317,5 @@ exporters: ``` That can help identify which metric sources are colliding, so you know which -applications or metrics need additional attributes to ditinguish them from +applications or metrics need additional attributes to distinguish them from one-another. diff --git a/exporter/kafkaexporter/kafka_exporter_test.go b/exporter/kafkaexporter/kafka_exporter_test.go index 80b56d05dbc8..a522ee585388 100644 --- a/exporter/kafkaexporter/kafka_exporter_test.go +++ b/exporter/kafkaexporter/kafka_exporter_test.go @@ -116,7 +116,7 @@ func TestNewExporter_err_auth_type(t *testing.T) { Authentication: kafka.Authentication{ TLS: &configtls.ClientConfig{ Config: configtls.Config{ - CAFile: "/doesnotexist", + CAFile: "/nonexistent", }, }, }, diff --git a/exporter/kafkaexporter/marshaler_test.go b/exporter/kafkaexporter/marshaler_test.go index d68c6332768b..95be677c7def 100644 --- a/exporter/kafkaexporter/marshaler_test.go +++ b/exporter/kafkaexporter/marshaler_test.go @@ -109,7 +109,7 @@ func TestOTLPMetricsJsonMarshaling(t *testing.T) { m := sm.Metrics().AppendEmpty() m.SetEmptyGauge() m.Gauge().DataPoints().AppendEmpty().SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(1, 0))) - m.Gauge().DataPoints().At(0).Attributes().PutStr("gauage_attribute", "attr") + m.Gauge().DataPoints().At(0).Attributes().PutStr("gauge_attribute", "attr") m.Gauge().DataPoints().At(0).SetDoubleValue(1.0) r1 := pcommon.NewResource() diff --git a/exporter/kineticaexporter/README.md b/exporter/kineticaexporter/README.md index 7c6e619f3eb3..d12288a90181 100644 --- a/exporter/kineticaexporter/README.md +++ b/exporter/kineticaexporter/README.md @@ -688,9 +688,9 @@ CREATE TABLE "otel"."metric_summary_scope_attribute" ``` -# Kinetica Opentelemetry Exporter +# Kinetica OpenTelemetry Exporter - This exporter could be used to as part of an `Opentelemetry` collector to persist data related to + This exporter could be used to as part of an `OpenTelemetry` collector to persist data related to `logs`, `traces` and `metrics` to the `Kinetica` database. This component is under `development` status. diff --git a/exporter/kineticaexporter/exporter_metric_test.go b/exporter/kineticaexporter/exporter_metric_test.go index cca7713a38a3..7f048c321056 100644 --- a/exporter/kineticaexporter/exporter_metric_test.go +++ b/exporter/kineticaexporter/exporter_metric_test.go @@ -424,7 +424,7 @@ func handleInsertRecords(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) _, err := w.Write([]byte("\x04OK\x00.insert_records_response\x08\x00\x06\x00\x00\x00")) if err != nil { - http.Error(w, "Error wrting reesponse", http.StatusInternalServerError) + http.Error(w, "Error writing response", http.StatusInternalServerError) return } } @@ -439,7 +439,7 @@ func handleExecuteSQL(w http.ResponseWriter, r *http.Request) { responseBytes := []byte("\x04OK\x00(execute_sql_response\xd4\x05\x02\xf6\x03{\"type\":\"record\",\"name\":\"generic_response\",\"fields\":[{\"name\":\"column_1\",\"type\":{\"type\":\"array\",\"items\":\"string\"}},{\"name\":\"column_headers\",\"type\":{\"type\":\"array\",\"items\":\"string\"}},{\"name\":\"column_datatypes\",\"type\":{\"type\":\"array\",\"items\":\"string\"}}]}$\x00\x02\ndummy\x00\x02\fstring\x00\x00\x01\x00\x00\b X-Kinetica-Group\x06DDL\ncount\x020\x1alast_endpoint\x1a/create/table.total_number_of_records\x020\x00\x00") _, err := w.Write(responseBytes) if err != nil { - http.Error(w, "Error wrting reesponse", http.StatusInternalServerError) + http.Error(w, "Error writing response", http.StatusInternalServerError) return } } @@ -467,7 +467,7 @@ func handleShowTable(w http.ResponseWriter, r *http.Request) { _, err = w.Write(finalResponseBytes) if err != nil { - http.Error(w, "Error wrting reesponse", http.StatusInternalServerError) + http.Error(w, "Error writing response", http.StatusInternalServerError) return } } diff --git a/exporter/kineticaexporter/metrics_exporter.go b/exporter/kineticaexporter/metrics_exporter.go index f8036b35ac92..17dd7cc98b2d 100644 --- a/exporter/kineticaexporter/metrics_exporter.go +++ b/exporter/kineticaexporter/metrics_exporter.go @@ -40,15 +40,15 @@ type kineticaSumRecord struct { } type kineticaHistogramRecord struct { - histogram *Histogram - histogramResourceAttribute []HistogramResourceAttribute - histogramScopeAttribute []HistogramScopeAttribute - histogramDatapoint []HistogramDatapoint - histogramDatapointAtribute []HistogramDataPointAttribute - histogramBucketCount []HistogramDatapointBucketCount - histogramExplicitBound []HistogramDatapointExplicitBound - exemplars []HistogramDatapointExemplar - exemplarAttribute []HistogramDataPointExemplarAttribute + histogram *Histogram + histogramResourceAttribute []HistogramResourceAttribute + histogramScopeAttribute []HistogramScopeAttribute + histogramDatapoint []HistogramDatapoint + histogramDatapointAttribute []HistogramDataPointAttribute + histogramBucketCount []HistogramDatapointBucketCount + histogramExplicitBound []HistogramDatapointExplicitBound + exemplars []HistogramDatapointExemplar + exemplarAttribute []HistogramDataPointExemplarAttribute } type kineticaExponentialHistogramRecord struct { @@ -376,7 +376,7 @@ func (e *kineticaMetricsExporter) pushMetricsData(_ context.Context, md pmetric. // createSummaryRecord - creates a Summary type record // -// @receiver e - Method aplicable to [kineticaMetricsExporter] +// @receiver e - Method applicable to [kineticaMetricsExporter] // @param resAttr - a map of key to value of resource attributes // @param _ schemaURL - unused // @param scopeInstr - the instrumentation scope @@ -815,7 +815,7 @@ func (e *kineticaMetricsExporter) createHistogramRecord(resAttr pcommon.Map, _ s datapointAttribute = append(datapointAttribute, *sa) } } - kiHistogramRecord.histogramDatapointAtribute = append(kiHistogramRecord.histogramDatapointAtribute, datapointAttribute...) + kiHistogramRecord.histogramDatapointAttribute = append(kiHistogramRecord.histogramDatapointAttribute, datapointAttribute...) for k := range datapointAttributes { delete(datapointAttributes, k) diff --git a/exporter/kineticaexporter/writer.go b/exporter/kineticaexporter/writer.go index 6cfb2655ddba..cb5811971b88 100644 --- a/exporter/kineticaexporter/writer.go +++ b/exporter/kineticaexporter/writer.go @@ -632,7 +632,7 @@ func (kiwriter *KiWriter) persistHistogramRecord(histogramRecords []kineticaHist datapoints = append(datapoints, dp) } - for _, dpattr := range histogramrecord.histogramDatapointAtribute { + for _, dpattr := range histogramrecord.histogramDatapointAttribute { datapointAttributes = append(datapointAttributes, dpattr) } diff --git a/exporter/loadbalancingexporter/README.md b/exporter/loadbalancingexporter/README.md index a6d5e8606d8e..667b21c833af 100644 --- a/exporter/loadbalancingexporter/README.md +++ b/exporter/loadbalancingexporter/README.md @@ -48,7 +48,7 @@ This also supports service name based exporting for traces. If you have two or m ## Resilience and scaling considerations -The `loadbalancingexporter` will, irrespective of the chosen resolver (`static`, `dns`, `k8s`), create one `otlp` exporter per endpoint. Each level of exporters, `loadbalancingexporter` itself and all sub-exporters (one per each endpoint), have it's own queue, timeout and retry mechanisms. Importantly, the `loadbalancingexporter`, by default, will NOT attempt to re-route data to a healthy endpoint on delivery failure, because in-memory queue, retry and timeout setting are disabled by default ([more details on queuing, retry and timeout default settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md)). +The `loadbalancingexporter` will, irrespective of the chosen resolver (`static`, `dns`, `k8s`), create one `otlp` exporter per endpoint. Each level of exporters, `loadbalancingexporter` itself and all sub-exporters (one per each endpoint), have its own queue, timeout and retry mechanisms. Importantly, the `loadbalancingexporter`, by default, will NOT attempt to re-route data to a healthy endpoint on delivery failure, because in-memory queue, retry and timeout setting are disabled by default ([more details on queuing, retry and timeout default settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md)). ``` +------------------+ +---------------+ diff --git a/exporter/loadbalancingexporter/consistent_hashing.go b/exporter/loadbalancingexporter/consistent_hashing.go index eda24bb2927e..faf71d1f515e 100644 --- a/exporter/loadbalancingexporter/consistent_hashing.go +++ b/exporter/loadbalancingexporter/consistent_hashing.go @@ -10,7 +10,7 @@ import ( const ( maxPositions uint32 = 36000 // 360 degrees with two decimal places - defaultWeight int = 100 // the number of points in the ring for each entry. For better results, it should be higher than 100. + defaultWeight int = 100 // the number of points in the ring for each entry. For better results, it should be greater than 100. ) // position represents a specific angle in the ring. @@ -79,7 +79,7 @@ func bsearch(pos position, left []ringItem, right []ringItem) ringItem { return left[0] } - // if the requested position is higher than the highest in the left, the item is in the right side + // if the requested position is greater than the highest in the left, the item is in the right side if pos > left[len(left)-1].pos { size := len(right) if size == 1 { diff --git a/exporter/loadbalancingexporter/example/README.md b/exporter/loadbalancingexporter/example/README.md index 6c5ef4710e2b..82e289d4f9aa 100644 --- a/exporter/loadbalancingexporter/example/README.md +++ b/exporter/loadbalancingexporter/example/README.md @@ -9,7 +9,7 @@ Supported pipeline types: logs docker build -t otelcontribcol . ``` -2. Then from this directory (exporter/loadbalacingexporter/example), run: +2. Then from this directory (exporter/loadbalancingexporter/example), run: ```shell docker-compose up ``` diff --git a/exporter/loadbalancingexporter/example/docker-compose.yaml b/exporter/loadbalancingexporter/example/docker-compose.yaml index 942d9506e856..a9e58e00785b 100644 --- a/exporter/loadbalancingexporter/example/docker-compose.yaml +++ b/exporter/loadbalancingexporter/example/docker-compose.yaml @@ -40,7 +40,7 @@ services: - "4317" # OTLP gRPC receiver - "55679" # zpages extension - # Otel agent (running loadbalacing exporter) + # Otel agent (running loadbalancing exporter) otel-agent: image: otelcontribcol:latest command: ["--config=/etc/otel-agent-config.yaml"] diff --git a/exporter/logzioexporter/README.md b/exporter/logzioexporter/README.md index c614d8c17b13..50146486ab47 100644 --- a/exporter/logzioexporter/README.md +++ b/exporter/logzioexporter/README.md @@ -113,7 +113,7 @@ service: level: "debug" ``` #### Metrics: -In order to use the Prometheus backend you must use the standard prometheusremotewrite exporter as well. The following [regions](https://docs.logz.io/user-guide/accounts/account-region.html#supported-regions-for-prometheus-metrics) are supported and configured as follows. The Logz.io Listener URL for for your region, configured to use port 8052 for http traffic, or port 8053 for https traffic. +In order to use the Prometheus backend you must use the standard prometheusremotewrite exporter as well. The following [regions](https://docs.logz.io/user-guide/accounts/account-region.html#supported-regions-for-prometheus-metrics) are supported and configured as follows. The Logz.io Listener URL for your region, configured to use port 8052 for http traffic, or port 8053 for https traffic. Example: ```yaml exporters: diff --git a/exporter/logzioexporter/factory_test.go b/exporter/logzioexporter/factory_test.go index 343e9b4565f7..14492b3a2613 100644 --- a/exporter/logzioexporter/factory_test.go +++ b/exporter/logzioexporter/factory_test.go @@ -50,9 +50,9 @@ func TestGenerateUrl(t *testing.T) { generateURLTests := []generateURLTest{ {"", "us", "https://listener.logz.io:8071/?token=token"}, {"", "", "https://listener.logz.io:8071/?token=token"}, - {"https://doesnotexist.com", "", "https://doesnotexist.com"}, - {"https://doesnotexist.com", "us", "https://doesnotexist.com"}, - {"https://doesnotexist.com", "not-valid", "https://doesnotexist.com"}, + {"https://nonexistent.com", "", "https://nonexistent.com"}, + {"https://nonexistent.com", "us", "https://nonexistent.com"}, + {"https://nonexistent.com", "not-valid", "https://nonexistent.com"}, {"", "not-valid", "https://listener.logz.io:8071/?token=token"}, {"", "US", "https://listener.logz.io:8071/?token=token"}, {"", "Us", "https://listener.logz.io:8071/?token=token"}, diff --git a/exporter/lokiexporter/README.md b/exporter/lokiexporter/README.md index 8929d4f32582..02e79fc203de 100644 --- a/exporter/lokiexporter/README.md +++ b/exporter/lokiexporter/README.md @@ -27,7 +27,7 @@ The new format for OpenTelemetry logs introduced in Loki V3 brings the following ### Loki log message format changes for OpenTelemetry logs -See OpenTelemetry Logs Data Model specification [here](https://opentelemetry.io/docs/specs/otel/logs/data-model/). +See [OpenTelemetry Logs Data Model specification](https://opentelemetry.io/docs/specs/otel/logs/data-model/). | OpenTelemetry log field | Pre Loki V3 | Loki V3 through the Loki OTLP Endpoint | | ----- | ----- | ----- | @@ -41,7 +41,7 @@ See OpenTelemetry Logs Data Model specification [here](https://opentelemetry.io/ | [`Body`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-body) | `body` field of the Loki JSON log message | The Loki log message. `__line__`in LogQL functions (e.g. `line_format`)| | [`InstrumentationScope`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-instrumentationscope) | `instrumentation_scope_name` field of the JSON log message | `metadata[scope_name]` | | [`Attributes`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-attributes) | JSON fields of the Loki log message | `metadata[xyz]` Where `xyz` is the `_` version of the OTel attribute name (e.g. `thread_name` Loki metadata for the `thread.name` OpenTelemetry attribute)| -| [`Resource`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-resource) | `service.name`, `service.namespace`, and `service.instance.id` are promoted as the following labels: `job=[${service.namespace}/]${service.name}`, instance=${service.instance.id}, exporter="OTLP"`. Other resource attributes are stored as JSON fields of the Loki log message with the prefix `resources_` (e.g. `resources_k8s_namespace_name`) | Default list of resource attributes promoted as Loki labels: `cloud.availability_zone`, `cloud.region`, `container.name`, `deployment.environment`, `k8s.cluster.name`, `k8s.container.name`, `k8s.cronjob.name`, `k8s.daemonset.name`, `k8s.deployment.name`, `k8s.job.name`, `k8s.namespace.name`, `k8s.pod.name`, `k8s.replicaset.name` `k8s.statefulset.name`, `service.instance.id`, `service.name`, `service.namespace`.
Other resource attributes are by default promoted as Loki message metadata.
ℹ️ The list of promoted resource attributes is configurable using Loki’s distributor config parameter `default_resource_attributes_as_index_labels` when using self managed Loki ([here](https://grafana.com/docs/loki/latest/configure/\#distributor)) or opening a support request when using Grafana Cloud | +| [`Resource`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-resource) | `service.name`, `service.namespace`, and `service.instance.id` are promoted as the following labels: `job=[${service.namespace}/]${service.name}`, instance=${service.instance.id}, exporter="OTLP"`. Other resource attributes are stored as JSON fields of the Loki log message with the prefix `resources_` (e.g. `resources_k8s_namespace_name`) | Default list of resource attributes promoted as Loki labels: `cloud.availability_zone`, `cloud.region`, `container.name`, `deployment.environment`, `k8s.cluster.name`, `k8s.container.name`, `k8s.cronjob.name`, `k8s.daemonset.name`, `k8s.deployment.name`, `k8s.job.name`, `k8s.namespace.name`, `k8s.pod.name`, `k8s.replicaset.name` `k8s.statefulset.name`, `service.instance.id`, `service.name`, `service.namespace`.
Other resource attributes are by default promoted as Loki message metadata.
ℹ️ The list of promoted resource attributes is configurable using Loki’s [distributor config parameter](https://grafana.com/docs/loki/latest/configure/\#distributor) `default_resource_attributes_as_index_labels` when using self managed Loki or opening a support request when using Grafana Cloud | ℹ️ Additional conversion rules from OpenTelemetry Logs to Loki @@ -115,7 +115,7 @@ service: * When using Grafana Cloud, the [Grafana Cloud OTLP endpoint](https://grafana.com/docs/grafana-cloud/send-data/otlp/send-data-otlp/) should be used instead of the Loki OTLP endpoint. The connection details of the Grafana Cloud OTLP endpoint, OTLP HTTP URL and credentials are available using the Grafana Cloud "OpenTelemetry Collector" connection tile. * The promotion of OpenTelemetry attributes and resource attributes to Loki labels using the `loki.attribute.labels` and `loki.resource.labels` hints is replaced by the list of promoted attributes managed centrally in Loki. * The default list of resource attributes promoted as labels (see above) should be sufficient for most use cases. -* ℹ️ Changes can be made to this list using the Loki distributor configuration parameter `default_resource_attributes_as_index_labels` ([here](https://grafana.com/docs/loki/latest/configure/\#distributor)) for self managed instances and opening a support ticket for Grafana Cloud. +* ℹ️ Changes can be made to this list using the Loki [distributor config parameter](https://grafana.com/docs/loki/latest/configure/\#distributor) `default_resource_attributes_as_index_labels` for self managed instances and opening a support ticket for Grafana Cloud. #### LogQL queries migration @@ -209,7 +209,7 @@ Configuration screenshot: To enable the "logs to trace" navigation from Loki to Tempo, navigate to the Grafana Loki data source configuration screen, in the "Derived fields" section, update or create a derived field with: * Name: `Trace ID` -* Type: `Label` (note that this `Label` name may be missleading because it also supports Loki message metadata) +* Type: `Label` (note that this `Label` name may be misleading because it also supports Loki message metadata) * Label: `trace_id` * Internal link: activated * Select the Tempo data source on which "trace to logs" is configured as described above @@ -271,8 +271,7 @@ processors: ``` Currently, Loki does not support label names with dots. -That's why lokiexporter normalizes label names to follow Prometheus label names standard before sending requests to Loki. -More information on label normalization could be found [here](../../pkg/translator/prometheus/README.md#Labels) +That's why lokiexporter normalizes label names to follow [Prometheus label names standard](../../pkg/translator/prometheus/README.md#Labels) before sending requests to Loki. The promotion of multiple resource and log attributes to labels is done with single action with comma-separated desired labels: ```yaml @@ -306,10 +305,10 @@ If `service.instance.id` is present then `instance=service.instance.id` is set If `service.instance.id` is not present then `instance` label is not set -The full list of settings exposed for this exporter are documented [here](./config.go) with detailed sample -configurations [here](./testdata/config.yaml). +The full list of settings exposed for this exporter are documented in [config.go](./config.go) with detailed sample +configurations in [testdata/config.yaml](./testdata/config.yaml). -More information on how to send logs to Grafana Loki using the OpenTelemetry Collector could be found [here](https://grafana.com/docs/opentelemetry/collector/send-logs-to-loki/) +For more information, see [how to send logs to Grafana Loki using the OpenTelemetry Collector](https://grafana.com/docs/opentelemetry/collector/send-logs-to-loki/) ### Tenant information diff --git a/exporter/opencensusexporter/README.md b/exporter/opencensusexporter/README.md index e60ce46c85a9..a9d3eca83ee3 100644 --- a/exporter/opencensusexporter/README.md +++ b/exporter/opencensusexporter/README.md @@ -21,8 +21,8 @@ Exports traces and/or metrics via gRPC using The following settings are required: - `endpoint` (no default): host:port to which the exporter is going to send Jaeger trace data, -using the gRPC protocol. The valid syntax is described -[here](https://github.com/grpc/grpc/blob/master/doc/naming.md) +using the gRPC protocol. The valid syntax is described in +[grpc's naming.md](https://github.com/grpc/grpc/blob/master/doc/naming.md). By default, TLS is enabled and must be configured under `tls:`: diff --git a/exporter/opensearchexporter/config.go b/exporter/opensearchexporter/config.go index b3108fa2a747..9f274be98ef6 100644 --- a/exporter/opensearchexporter/config.go +++ b/exporter/opensearchexporter/config.go @@ -83,7 +83,7 @@ type MappingsSettings struct { // Field to store timestamp in. If not set uses the default @timestamp TimestampField string `mapstructure:"timestamp_field"` - // Whether to store timestamp in Epoch miliseconds + // Whether to store timestamp in Epoch milliseconds UnixTimestamp bool `mapstructure:"unix_timestamp"` // Try to find and remove duplicate fields diff --git a/exporter/opensearchexporter/internal/objmodel/objmodel.go b/exporter/opensearchexporter/internal/objmodel/objmodel.go index de7d93da9b04..40d944d9f882 100644 --- a/exporter/opensearchexporter/internal/objmodel/objmodel.go +++ b/exporter/opensearchexporter/internal/objmodel/objmodel.go @@ -5,8 +5,8 @@ // JSON documents. // // The JSON parsing in OpenSearch does not support parsing JSON documents -// with duplicate fields. The fields in the docuemt can be sort and duplicate entries -// can be removed before serializing. Deduplication ensures that ambigious +// with duplicate fields. The fields in the document can be sort and duplicate entries +// can be removed before serializing. Deduplication ensures that ambiguous // events can still be indexed. // // With attributes map encoded as a list of key value diff --git a/exporter/otelarrowexporter/README.md b/exporter/otelarrowexporter/README.md index 2be2a1c4b069..8017d73c3090 100644 --- a/exporter/otelarrowexporter/README.md +++ b/exporter/otelarrowexporter/README.md @@ -54,8 +54,8 @@ setting is required. The `tls` setting is required for insecure transport. - `endpoint` (no default): host:port to which the exporter is going to send OTLP trace data, -using the gRPC protocol. The valid syntax is described -[here](https://github.com/grpc/grpc/blob/master/doc/naming.md). +using the gRPC protocol. The valid syntax is described in +[grpc's naming.md](https://github.com/grpc/grpc/blob/master/doc/naming.md). If a scheme of `https` is used then client transport security is enabled and overrides the `insecure` setting. - `tls`: see [TLS Configuration Settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) for the full set of available options. @@ -187,7 +187,7 @@ receivers: ### Exporter metrics -In addition to the the standard +In addition to the standard [exporterhelper](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) and [obsreport](https://pkg.go.dev/go.opentelemetry.io/collector/obsreport) diff --git a/exporter/otelarrowexporter/internal/arrow/bestofn.go b/exporter/otelarrowexporter/internal/arrow/bestofn.go index 443713cda815..d65f3f6c3f1b 100644 --- a/exporter/otelarrowexporter/internal/arrow/bestofn.go +++ b/exporter/otelarrowexporter/internal/arrow/bestofn.go @@ -30,7 +30,7 @@ type bestOfNPrioritizer struct { // state tracks the work being handled by all streams. state []*streamWorkState - // numChoices is the number of streams to consder in each decision. + // numChoices is the number of streams to consider in each decision. numChoices int // loadFunc is the load function. @@ -71,7 +71,7 @@ func newBestOfNPrioritizer(dc doneCancel, numChoices, numStreams int, lf loadFun } for i := 0; i < numStreams; i++ { - // TODO It's not clear if/when the the prioritizer can + // TODO It's not clear if/when the prioritizer can // become a bottleneck. go lp.run() } diff --git a/exporter/otelarrowexporter/internal/arrow/exporter_test.go b/exporter/otelarrowexporter/internal/arrow/exporter_test.go index a182d02ecc45..34bcfaa1f71b 100644 --- a/exporter/otelarrowexporter/internal/arrow/exporter_test.go +++ b/exporter/otelarrowexporter/internal/arrow/exporter_test.go @@ -301,7 +301,7 @@ func TestArrowExporterTimeout(t *testing.T) { } } -// TestConnectError tests that if the connetions fail fast the +// TestConnectError tests that if the connections fail fast the // stream object for some reason is nil. This causes downgrade. func TestArrowExporterStreamConnectError(t *testing.T) { for _, pname := range AllPrioritizers { diff --git a/exporter/otelarrowexporter/internal/arrow/stream.go b/exporter/otelarrowexporter/internal/arrow/stream.go index c6251b7ad249..f44ebef9abc7 100644 --- a/exporter/otelarrowexporter/internal/arrow/stream.go +++ b/exporter/otelarrowexporter/internal/arrow/stream.go @@ -328,7 +328,7 @@ func (s *Stream) encodeAndSend(wri writeItem, hdrsBuf *bytes.Buffer, hdrsEnc *hp if err != nil { // This case is like the encode-failure case // above, we will restart the stream but consider - // this a permenent error. + // this a permanent error. err = status.Errorf(codes.Internal, "hpack: %v", err) wri.errCh <- err return err @@ -380,7 +380,7 @@ func (s *Stream) read(_ context.Context) error { } } -// getSenderChannel takes the stream lock and removes the corresonding +// getSenderChannel takes the stream lock and removes the corresponding // sender channel. func (sws *streamWorkState) getSenderChannel(bstat *arrowpb.BatchStatus) (chan<- error, error) { sws.lock.Lock() diff --git a/exporter/prometheusexporter/README.md b/exporter/prometheusexporter/README.md index 82d10371d28f..790b6a526d61 100644 --- a/exporter/prometheusexporter/README.md +++ b/exporter/prometheusexporter/README.md @@ -19,7 +19,7 @@ Exports data in the [Prometheus format](https://prometheus.io/docs/concepts/data The following settings are required: -- `endpoint` (no default): the address on which metrics will be exposed, using path `/metrics`. For full list of `ServerConfig` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp). +- `endpoint` (no default): the address on which metrics will be exposed, using path `/metrics`. For more info, see the [full list of `ServerConfig` options](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp). The following settings can be optionally configured: diff --git a/exporter/prometheusexporter/accumulator.go b/exporter/prometheusexporter/accumulator.go index 3a6559118fe5..2089a41953d5 100644 --- a/exporter/prometheusexporter/accumulator.go +++ b/exporter/prometheusexporter/accumulator.go @@ -29,9 +29,9 @@ type accumulatedValue struct { scope pcommon.InstrumentationScope } -// accumulator stores aggragated values of incoming metrics +// accumulator stores aggregated values of incoming metrics type accumulator interface { - // Accumulate stores aggragated metric values + // Accumulate stores aggregated metric values Accumulate(resourceMetrics pmetric.ResourceMetrics) (processed int) // Collect returns a slice with relevant aggregated metrics and their resource attributes. // The number or metrics and attributes returned will be the same. diff --git a/exporter/prometheusremotewriteexporter/DESIGN.md b/exporter/prometheusremotewriteexporter/DESIGN.md index 36946ddccd5f..32ece531ec79 100644 --- a/exporter/prometheusremotewriteexporter/DESIGN.md +++ b/exporter/prometheusremotewriteexporter/DESIGN.md @@ -225,7 +225,7 @@ This method creates the default configuration for Prometheus remote write/Cortex createMetricsExporter -This method constructs a new http.Client with interceptors that add headers to any request it sends. Then, this method initializes a new Prometheus remote write exporter/Cortex exporter with the http.Client. This method constructs a collector Prometheus remote write/Cortex exporter exporter with the created SDK exporter +This method constructs a new http.Client with interceptors that add headers to any request it sends. Then, this method initializes a new Prometheus remote write exporter/Cortex exporter with the http.Client. This method constructs a collector Prometheus remote write/Cortex exporter with the created SDK exporter diff --git a/exporter/prometheusremotewriteexporter/exporter.go b/exporter/prometheusremotewriteexporter/exporter.go index cb3b88986930..13cf3b252cc3 100644 --- a/exporter/prometheusremotewriteexporter/exporter.go +++ b/exporter/prometheusremotewriteexporter/exporter.go @@ -144,7 +144,7 @@ func newPRWExporter(cfg *Config, set exporter.Settings) (*prwExporter, error) { SendMetadata: cfg.SendMetadata, }, telemetry: prwTelemetry, - batchStatePool: sync.Pool{New: func() any { return newBatchTimeSericesState() }}, + batchStatePool: sync.Pool{New: func() any { return newBatchTimeServicesState() }}, } if prwe.exporterSettings.ExportCreatedMetric { diff --git a/exporter/prometheusremotewriteexporter/exporter_test.go b/exporter/prometheusremotewriteexporter/exporter_test.go index ae8d930edf88..69721010bbd6 100644 --- a/exporter/prometheusremotewriteexporter/exporter_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_test.go @@ -164,7 +164,7 @@ func Test_Start(t *testing.T) { clientConfigTLS.Endpoint = "https://some.url:9411/api/prom/push" clientConfigTLS.TLSSetting = configtls.ClientConfig{ Config: configtls.Config{ - CAFile: "non-existent file", + CAFile: "nonexistent file", CertFile: "", KeyFile: "", }, @@ -997,7 +997,7 @@ func TestWALOnExporterRoundTrip(t *testing.T) { errs := prwe.handleExport(ctx, tsMap, nil) assert.NoError(t, errs) // Shutdown after we've written to the WAL. This ensures that our - // exported data in-flight will flushed flushed to the WAL before exiting. + // exported data in-flight will be flushed to the WAL before exiting. require.NoError(t, prwe.Shutdown(ctx)) // 3. Let's now read back all of the WAL records and ensure diff --git a/exporter/prometheusremotewriteexporter/factory_test.go b/exporter/prometheusremotewriteexporter/factory_test.go index 8196d5baaf8f..435908ff1328 100644 --- a/exporter/prometheusremotewriteexporter/factory_test.go +++ b/exporter/prometheusremotewriteexporter/factory_test.go @@ -30,7 +30,7 @@ func Test_createMetricsExporter(t *testing.T) { invalidTLSConfig := createDefaultConfig().(*Config) invalidTLSConfig.ClientConfig.TLSSetting = configtls.ClientConfig{ Config: configtls.Config{ - CAFile: "non-existent file", + CAFile: "nonexistent file", CertFile: "", KeyFile: "", }, diff --git a/exporter/prometheusremotewriteexporter/helper.go b/exporter/prometheusremotewriteexporter/helper.go index 26def2570eff..e073099b98e0 100644 --- a/exporter/prometheusremotewriteexporter/helper.go +++ b/exporter/prometheusremotewriteexporter/helper.go @@ -19,7 +19,7 @@ type batchTimeSeriesState struct { nextRequestBufferSize int } -func newBatchTimeSericesState() *batchTimeSeriesState { +func newBatchTimeServicesState() *batchTimeSeriesState { return &batchTimeSeriesState{ nextTimeSeriesBufferSize: math.MaxInt, nextMetricMetadataBufferSize: math.MaxInt, @@ -95,7 +95,7 @@ func batchTimeSeries(tsMap map[string]*prompb.TimeSeries, maxBatchByteSize int, func convertTimeseriesToRequest(tsArray []prompb.TimeSeries) *prompb.WriteRequest { // the remote_write endpoint only requires the timeseries. - // otlp defines it's own way to handle metric metadata + // otlp defines its own way to handle metric metadata return &prompb.WriteRequest{ // Prometheus requires time series to be sorted by Timestamp to avoid out of order problems. // See: diff --git a/exporter/prometheusremotewriteexporter/helper_test.go b/exporter/prometheusremotewriteexporter/helper_test.go index d45704d43b1c..46a61735a1c7 100644 --- a/exporter/prometheusremotewriteexporter/helper_test.go +++ b/exporter/prometheusremotewriteexporter/helper_test.go @@ -58,7 +58,7 @@ func Test_batchTimeSeries(t *testing.T) { // run tests for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - state := newBatchTimeSericesState() + state := newBatchTimeServicesState() requests, err := batchTimeSeries(tt.tsMap, tt.maxBatchByteSize, nil, state) if tt.returnErr { assert.Error(t, err) @@ -96,7 +96,7 @@ func Test_batchTimeSeriesUpdatesStateForLargeBatches(t *testing.T) { tsMap1 := getTimeseriesMap(tsArray) - state := newBatchTimeSericesState() + state := newBatchTimeServicesState() requests, err := batchTimeSeries(tsMap1, 1000000, nil, state) assert.NoError(t, err) @@ -129,7 +129,7 @@ func Benchmark_batchTimeSeries(b *testing.B) { b.ReportAllocs() b.ResetTimer() - state := newBatchTimeSericesState() + state := newBatchTimeServicesState() // Run batchTimeSeries 100 times with a 1mb max request size for i := 0; i < b.N; i++ { requests, err := batchTimeSeries(tsMap1, 1000000, nil, state) diff --git a/exporter/rabbitmqexporter/internal/publisher/publisher_test.go b/exporter/rabbitmqexporter/internal/publisher/publisher_test.go index 96b2c99ad6d6..5bdf1103aaba 100644 --- a/exporter/rabbitmqexporter/internal/publisher/publisher_test.go +++ b/exporter/rabbitmqexporter/internal/publisher/publisher_test.go @@ -293,7 +293,7 @@ func resetCall(t *testing.T, calls []*mock.Call, methodName string) { return } } - t.Errorf("Faild to reset method %s", methodName) + t.Errorf("Failed to reset method %s", methodName) t.FailNow() } diff --git a/exporter/sapmexporter/README.md b/exporter/sapmexporter/README.md index b3e612898cc0..f97dbdd2f945 100644 --- a/exporter/sapmexporter/README.md +++ b/exporter/sapmexporter/README.md @@ -27,7 +27,7 @@ exporters: ``` The SAPM exporter builds on the Jaeger proto and adds additional batching on top. This allows the collector to export traces from multiples nodes/services in a single batch. The SAPM proto -and some useful related utilities can be found [here](https://github.com/signalfx/sapm-proto/). +and some useful related utilities is in [signalfx/sapm-proto](https://github.com/signalfx/sapm-proto/). > Please review the Collector's [security > documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md), @@ -38,9 +38,9 @@ and some useful related utilities can be found [here](https://github.com/signalf The following configuration options are required: -- `access_token` (no default): AccessToken is the authentication token provided by Splunk Observability Cloud or +- `access_token` (no default): AccessToken is the [authentication token provided by Splunk Observability Cloud](https://docs.splunk.com/observability/en/admin/authentication/authentication-tokens/manage-usage.html) or another backend that supports the SAPM proto. The access token can be obtained from the -web app. For details on how to do so please refer the documentation [here](https://docs.splunk.com/observability/en/admin/authentication/authentication-tokens/manage-usage.html). +web app. - `endpoint` (no default): This is the destination to where traces will be sent to in SAPM format. It must be a full URL and include the scheme, port and path e.g, https://ingest.us0.signalfx.com/v2/trace. This can be pointed to the SignalFx @@ -67,8 +67,7 @@ In addition to setting this option to `true`, debug logging at the Collector lev compressed and `compression` option is ignored. In addition, this exporter offers queued retry which is enabled by default. -Information about queued retry configuration parameters can be found -[here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md). +For more info, see the [exporterhelper configuration parameters](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md). Example: @@ -83,8 +82,7 @@ exporters: log_detailed_response: true ``` -The full list of settings exposed for this exporter are documented [here](config.go) -with detailed sample configurations [here](testdata/config.yaml). +The full list of settings exposed for this exporter are documented in [config.go](./config.go) +with detailed sample configurations in [testdata/config.yaml](./testdata/config.yaml). -This exporter also offers proxy support as documented -[here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter#proxy-support). +This exporter also offers [proxy support](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter#proxy-support). diff --git a/exporter/sapmexporter/examples/signalfx-k8s.yaml b/exporter/sapmexporter/examples/signalfx-k8s.yaml index d660306fe566..84863dbf4db5 100644 --- a/exporter/sapmexporter/examples/signalfx-k8s.yaml +++ b/exporter/sapmexporter/examples/signalfx-k8s.yaml @@ -44,7 +44,7 @@ data: #attributes/copyfromexistingkey: #actions: #- key: environment - #from_attribute: YOUR_EXISTING_TAG_NAMEE + #from_attribute: YOUR_EXISTING_TAG_NAME #action: upsert # Optional: If you want to add an environment tag # If this option is enabled it must be added to the pipeline section below diff --git a/exporter/sapmexporter/exporter_test.go b/exporter/sapmexporter/exporter_test.go index c922ec65a127..bfe82ee6a076 100644 --- a/exporter/sapmexporter/exporter_test.go +++ b/exporter/sapmexporter/exporter_test.go @@ -232,7 +232,7 @@ func TestSAPMClientTokenAccess(t *testing.T) { accessTokenPassthrough: true, }, { - name: "Token in config wihout passthrough", + name: "Token in config without passthrough", inContext: false, accessTokenPassthrough: false, }, diff --git a/exporter/sentryexporter/config.go b/exporter/sentryexporter/config.go index 32783fe4917e..b3587bee33c6 100644 --- a/exporter/sentryexporter/config.go +++ b/exporter/sentryexporter/config.go @@ -12,7 +12,7 @@ type Config struct { // DSN to report transaction to Sentry. If the DSN is not set, no trace will be sent to Sentry. DSN string `mapstructure:"dsn"` // The deployment environment name, such as production or staging. - // Environments are case sensitive. The environment name can't contain newlines, spaces or forward slashes, + // Environments are case-sensitive. The environment name can't contain newlines, spaces or forward slashes, // can't be the string "None", or exceed 64 characters. Environment string `mapstructure:"environment"` // InsecureSkipVerify controls whether the client verifies the Sentry server certificate chain diff --git a/exporter/sentryexporter/docs/transformation.md b/exporter/sentryexporter/docs/transformation.md index c8ba4bb78e3c..40a8823b2392 100644 --- a/exporter/sentryexporter/docs/transformation.md +++ b/exporter/sentryexporter/docs/transformation.md @@ -1,11 +1,9 @@ # OpenTelemetry to Sentry Transformation -This document aims to define the transformations between an OpenTelemetry span and a Sentry Span. It will also describe how a Sentry transaction is created from a set of Sentry spans. +This document aims to define the transformations between an OpenTelemetry span and a [Sentry Span](https://develop.sentry.dev/sdk/event-payloads/span/). It will also describe how a Sentry transaction is created from a set of Sentry spans. ## Spans -The interface for a Sentry Span can be found [here](https://develop.sentry.dev/sdk/event-payloads/span/) - | Sentry | OpenTelemetry | Notes | | ------------------- | --------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | | Span.TraceID | Span.TraceID | | @@ -18,7 +16,7 @@ The interface for a Sentry Span can be found [here](https://develop.sentry.dev/s | Span.EndTimestamp | span.EndTime | | | Span.Status | Span.Status | | -As can be seen by the table above, the OpenTelemetry span and Sentry span map fairly reasonably. Currently the OpenTelemtry `Span.Link` and `Span.TraceState` properties are not used when constructing a `SentrySpan` +As can be seen by the table above, the OpenTelemetry span and Sentry span map fairly reasonably. Currently the OpenTelemetry `Span.Link` and `Span.TraceState` properties are not used when constructing a `SentrySpan` ## Transactions @@ -34,7 +32,7 @@ After this first iteration, we are left with two structures, an array of transac We can then try again to classify these orphan spans, but if not possible, we can assume these orphan spans to be a root span (as we could not find their parent in the trace). Those root spans generated from orphan spans can be also be then used to create their respective transactions. -The interface for a Sentry Transaction can be found [here](https://develop.sentry.dev/sdk/event-payloads/transaction/) +For more information, see the [interface for a Sentry Transaction](https://develop.sentry.dev/sdk/event-payloads/transaction/) | Sentry | Used to generate | | ----------------------------- | ---------------------------------------------- | diff --git a/exporter/signalfxexporter/README.md b/exporter/signalfxexporter/README.md index 043ef34e4590..0c53840d6e92 100644 --- a/exporter/signalfxexporter/README.md +++ b/exporter/signalfxexporter/README.md @@ -23,9 +23,10 @@ supported. The following configuration options are required: -- `access_token` (no default): The access token is the authentication token - provided by Splunk Observability Cloud. The access token can be obtained from the - web app. For details on how to do so please refer the documentation [here](https://docs.splunk.com/observability/en/admin/authentication/authentication-tokens/manage-usage.html). +- `access_token` (no default): The access token is the [authentication token + provided by Splunk Observability + Cloud](https://docs.splunk.com/observability/en/admin/authentication/authentication-tokens/manage-usage.html). + The access token can be obtained from the web app. - Either `realm` or both `api_url` and `ingest_url`. Both `api_url` and `ingest_url` take precedence over `realm`. - `realm` (no default): SignalFx realm where the data will be received. @@ -56,13 +57,13 @@ The following configuration options can also be configured: - `exclude_metrics`: List of metric filters that will determine metrics to be excluded from sending to Signalfx backend. The filtering is applied after the default translations controlled by `disable_default_translation_rules` option. - See [here](./testdata/config.yaml) for examples. Apart from the values explicitly - provided via this option, by default, [these](./internal/translation/default_metrics.go) are + See in [testdata/config.yaml](./testdata/config.yaml) for examples. Apart from the values explicitly + provided via this option, by default, [default metrics](./internal/translation/default_metrics.go) are also appended to this list. Setting this option to `[]` will override all the default excludes. - `include_metrics`: List of filters to override exclusion of any metrics. This option can be used to included metrics that are otherwise dropped by - default. See [here](./internal/translation/default_metrics.go) for a list of metrics + default. See [default metrics](./internal/translation/default_metrics.go) for a list of metrics that are dropped by default. For example, the following configuration can be used to send through some of that are dropped by default. ```yaml @@ -145,8 +146,7 @@ will be replaced with a `_`. - `drop_histogram_buckets`: (default = `false`) if set to true, histogram buckets will not be translated into datapoints with `_bucket` suffix but will be dropped instead, only datapoints with `_sum`, `_count`, `_min` (optional) and `_max` (optional) suffixes will be sent. Please note that this option does not apply to histograms sent in OTLP format with `send_otlp_histograms` enabled. - `send_otlp_histograms`: (default: `false`) if set to true, any histogram metrics receiver by the exporter will be sent to Splunk Observability backend in OTLP format without conversion to SignalFx format. This can only be enabled if the Splunk Observability environment (realm) has the new Histograms feature rolled out. Please note that histograms sent in OTLP format do not apply to the exporter configurations `include_metrics` and `exclude_metrics`. In addition, this exporter offers queued retry which is enabled by default. -Information about queued retry configuration parameters can be found -[here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md). +For more information, see the queued retry options in the [exporter documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md). ## Traces Configuration (correlation only) @@ -210,7 +210,7 @@ help ensure compatibility with custom charts and dashboards when using the OpenT The rule language is expressed in yaml mappings and is [documented here](./internal/translation/translator.go). Translation rules currently allow the following actions: * `aggregate_metric` - Aggregates a metric through removal of specified dimensions -* `calculate_new_metric` - Creates a new metric via operating on two consistuent ones +* `calculate_new_metric` - Creates a new metric via operating on two constituent ones * `convert_values` - Convert float values to int or int to float for specified metric names * `copy_metrics` - Creates a new metric as a copy of another * `delta_metric` - Creates a new delta metric for a specified non-delta one @@ -300,11 +300,10 @@ service: exporters: [signalfx] ``` -The full list of settings exposed for this exporter are documented [here](config.go) -with detailed sample configurations [here](testdata/config.yaml). +The full list of settings exposed for this exporter are documented in [config.go](./config.go) +with detailed sample configurations in [testdata/config.yaml](./testdata/config.yaml). -This exporter also offers proxy support as documented -[here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter#proxy-support). +This exporter also offers [proxy support](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter#proxy-support). ## Advanced Configuration diff --git a/exporter/signalfxexporter/config.go b/exporter/signalfxexporter/config.go index 9703b9313db8..ab339795229b 100644 --- a/exporter/signalfxexporter/config.go +++ b/exporter/signalfxexporter/config.go @@ -74,7 +74,7 @@ type Config struct { // value takes precedence over the value of Realm APIURL string `mapstructure:"api_url"` - // api_tls needs to be set if the exporter's APIURL is pointing to a httforwarder extension + // api_tls needs to be set if the exporter's APIURL is pointing to a httpforwarder extension // with TLS enabled and using a self-signed certificate where its CA is not loaded in the system cert pool. APITLSSettings configtls.ClientConfig `mapstructure:"api_tls,omitempty"` diff --git a/exporter/signalfxexporter/internal/apm/correlations/client.go b/exporter/signalfxexporter/internal/apm/correlations/client.go index e9e592afe46a..e3ff158964fc 100644 --- a/exporter/signalfxexporter/internal/apm/correlations/client.go +++ b/exporter/signalfxexporter/internal/apm/correlations/client.go @@ -176,7 +176,7 @@ func (cc *Client) putRequestOnRetryChan(r *request) error { } // CorrelateCB is a call back invoked with Correlate requests -// it is not invoked if the reqeust is deduplicated, cancelled, or the client context is cancelled +// it is not invoked if the request is deduplicated, cancelled, or the client context is cancelled type CorrelateCB func(cor *Correlation, err error) // Correlate diff --git a/exporter/signalfxexporter/internal/apm/correlations/dedup.go b/exporter/signalfxexporter/internal/apm/correlations/dedup.go index 28e26dc483fe..8cc6deb1e63c 100644 --- a/exporter/signalfxexporter/internal/apm/correlations/dedup.go +++ b/exporter/signalfxexporter/internal/apm/correlations/dedup.go @@ -101,8 +101,8 @@ func (d *deduplicator) dedupCorrelate(r *request) bool { d.pendingCreateKeys[*r.Correlation] = elem // cancel any pending delete operations - deleteElem, pendindgDelete := d.pendingDeleteKeys[*r.Correlation] - if pendindgDelete { + deleteElem, pendingDelete := d.pendingDeleteKeys[*r.Correlation] + if pendingDelete { deleteElem.Value.(*request).cancel() d.pendingDeletes.Remove(deleteElem) delete(d.pendingDeleteKeys, *deleteElem.Value.(*request).Correlation) @@ -129,8 +129,8 @@ func (d *deduplicator) dedupDelete(r *request) bool { d.pendingDeleteKeys[*r.Correlation] = elem // cancel any pending create operations - createElem, pendindgCreate := d.pendingCreateKeys[*r.Correlation] - if pendindgCreate { + createElem, pendingCreate := d.pendingCreateKeys[*r.Correlation] + if pendingCreate { createElem.Value.(*request).cancel() d.pendingCreates.Remove(createElem) delete(d.pendingCreateKeys, *createElem.Value.(*request).Correlation) diff --git a/exporter/signalfxexporter/internal/apm/requests/requestcounter/counter_test.go b/exporter/signalfxexporter/internal/apm/requests/requestcounter/counter_test.go index 92a6d74cbdaf..4e292fc85335 100644 --- a/exporter/signalfxexporter/internal/apm/requests/requestcounter/counter_test.go +++ b/exporter/signalfxexporter/internal/apm/requests/requestcounter/counter_test.go @@ -32,7 +32,7 @@ func TestContextWithRequestCounter(t *testing.T) { // ensure increment on parent also increments child IncrementRequestCount(parent) - assert.Equal(t, uint32(3), GetRequestCount(parent), "parent context can still still increment counter") + assert.Equal(t, uint32(3), GetRequestCount(parent), "parent context can still increment counter") assert.Equal(t, uint32(3), GetRequestCount(child), "child context counter was incremented when parent was incremented") assert.Equal(t, uint32(3), GetRequestCount(ContextWithRequestCounter(parent)), "trying to get a context with a counter shouldn't not overwrite an existing counter") @@ -42,7 +42,7 @@ func TestContextWithRequestCounter(t *testing.T) { assert.Equal(t, uint32(0), GetRequestCount(parent), "parent context counter was reset") assert.Equal(t, uint32(0), GetRequestCount(child), "child context counter was reset") - // ensure no error when context with out counter is passed in to functions + // ensure no error when context without counter is passed in to functions todo := context.TODO() assert.False(t, counterExists(todo), "plain context shouldn't have a counter") assert.Equal(t, uint32(0), GetRequestCount(todo), "plain context should return count of 0") diff --git a/exporter/signalfxexporter/internal/translation/constants.go b/exporter/signalfxexporter/internal/translation/constants.go index e6f43ecc89b7..ffc02f035edc 100644 --- a/exporter/signalfxexporter/internal/translation/constants.go +++ b/exporter/signalfxexporter/internal/translation/constants.go @@ -295,7 +295,7 @@ translation_rules: without_dimensions: - device -## Calculate an extra disk_ops.total metric as number all all read and write operations happened since the last report. +## Calculate an extra disk_ops.total metric as number of all read and write operations happened since the last report. - action: copy_metrics mapping: system.disk.operations: sf_temp.disk.ops diff --git a/exporter/signalfxexporter/internal/translation/dpfilters/filterset.go b/exporter/signalfxexporter/internal/translation/dpfilters/filterset.go index 853f0445e0b3..e3437c5d849f 100644 --- a/exporter/signalfxexporter/internal/translation/dpfilters/filterset.go +++ b/exporter/signalfxexporter/internal/translation/dpfilters/filterset.go @@ -5,7 +5,7 @@ package dpfilters // import "github.com/open-telemetry/opentelemetry-collector-c import sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" -// FilterSet is a collection of datapont filters, any one of which must match +// FilterSet is a collection of datapoint filters, any one of which must match // for a datapoint to be matched. type FilterSet struct { excludeFilters []*dataPointFilter diff --git a/exporter/signalfxexporter/internal/translation/translator.go b/exporter/signalfxexporter/internal/translation/translator.go index 324188e820ee..5dd4b7dccb76 100644 --- a/exporter/signalfxexporter/internal/translation/translator.go +++ b/exporter/signalfxexporter/internal/translation/translator.go @@ -46,7 +46,7 @@ const ( // ActionCopyMetrics copies metrics using Rule.Mapping. // Rule.DimensionKey and Rule.DimensionValues can be used to filter datapoints that must be copied, - // if these fields are set, only metics having a dimension with key == Rule.DimensionKey and + // if these fields are set, only metrics having a dimension with key == Rule.DimensionKey and // value in Rule.DimensionValues will be copied. ActionCopyMetrics Action = "copy_metrics" @@ -177,7 +177,7 @@ type Rule struct { // DimensionKey is used by "split_metric" translation rule action to specify dimension key // that will be used to translate the metric datapoints. Datapoints that don't have // the specified dimension key will not be translated. - // DimensionKey is also used by "copy_metrics" for filterring. + // DimensionKey is also used by "copy_metrics" for filtering. DimensionKey string `mapstructure:"dimension_key"` // DimensionValues is used by "copy_metrics" to filter out datapoints with dimensions values diff --git a/exporter/signalfxexporter/internal/translation/translator_test.go b/exporter/signalfxexporter/internal/translation/translator_test.go index 23fa4194074f..af47b59e2c53 100644 --- a/exporter/signalfxexporter/internal/translation/translator_test.go +++ b/exporter/signalfxexporter/internal/translation/translator_test.go @@ -1210,7 +1210,7 @@ func TestTranslateDataPoints(t *testing.T) { }, { Key: "dim2", - Value: "val2-aleternate", + Value: "val2-alternate", }, }, }, @@ -1280,7 +1280,7 @@ func TestTranslateDataPoints(t *testing.T) { Dimensions: []*sfxpb.Dimension{ { Key: "dim2", - Value: "val2-aleternate", + Value: "val2-alternate", }, }, }, diff --git a/exporter/splunkhecexporter/README.md b/exporter/splunkhecexporter/README.md index 29a25f8ac7fe..8477f2ad416d 100644 --- a/exporter/splunkhecexporter/README.md +++ b/exporter/splunkhecexporter/README.md @@ -74,11 +74,10 @@ The following configuration options can also be configured: - `telemetry/enabled` (default: false): Specifies whether to enable telemetry inside splunk hec exporter. - `telemetry/override_metrics_names` (default: empty map): Specifies the metrics name to overrides in splunk hec exporter. - `telemetry/extra_attributes` (default: empty map): Specifies the extra metrics attributes in splunk hec exporter. -- `batcher`(Experimental, disabled by default): Specifies batching configuration on the exporter. Information about the configuration can be found [here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) +- `batcher`(Experimental, disabled by default): Specifies [batching configuration on the exporter](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md). In addition, this exporter offers queued retry which is enabled by default. -Information about queued retry configuration parameters can be found -[here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md). +For more information, see the queued retry options in the [exporter documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md).
If you are getting throttled due to high volume of events the collector might experience memory issues, in those cases it is recommended to change the queued retry [configuration](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/exporterhelper#configuration) to drop events more frequently, for example you can reduce the maximum amount of time spent trying to send a batch from 120s (default) to 60s: ```yaml @@ -145,11 +144,10 @@ exporters: custom_key: custom_value ``` -The full list of settings exposed for this exporter are documented [here](config.go) -with detailed sample configurations [here](testdata/config.yaml). +The full list of settings exposed for this exporter are documented in [config.go](./config.go) +with detailed sample configurations in [testdata/config.yaml](./testdata/config.yaml). -This exporter also offers proxy support as documented -[here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter#proxy-support). +This exporter also offers [proxy support](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter#proxy-support). ## Advanced Configuration diff --git a/exporter/splunkhecexporter/client_test.go b/exporter/splunkhecexporter/client_test.go index 24f611458dda..62c8f67508d3 100644 --- a/exporter/splunkhecexporter/client_test.go +++ b/exporter/splunkhecexporter/client_test.go @@ -1716,7 +1716,7 @@ func Benchmark_pushLogData_compressed_10_10_1024(b *testing.B) { benchPushLogData(b, 10, 10, 1024, true) } -// 10 resources, 10 records, 8Kb max HEC batch: 1 HEC batche +// 10 resources, 10 records, 8Kb max HEC batch: 1 HEC batch func Benchmark_pushLogData_compressed_10_10_8K(b *testing.B) { benchPushLogData(b, 10, 10, 8*1024, true) } @@ -1805,7 +1805,7 @@ func Benchmark_pushMetricData_compressed_10_10_1024(b *testing.B) { benchPushMetricData(b, 10, 10, 1024, true, false) } -// 10 resources, 10 records, 8Kb max HEC batch: 1 HEC batche +// 10 resources, 10 records, 8Kb max HEC batch: 1 HEC batch func Benchmark_pushMetricData_compressed_10_10_8K(b *testing.B) { benchPushMetricData(b, 10, 10, 8*1024, true, false) } @@ -1865,7 +1865,7 @@ func Benchmark_pushMetricData_compressed_10_10_1024_MultiMetric(b *testing.B) { benchPushMetricData(b, 10, 10, 1024, true, true) } -// 10 resources, 10 records, 8Kb max HEC batch: 1 HEC batche +// 10 resources, 10 records, 8Kb max HEC batch: 1 HEC batch func Benchmark_pushMetricData_compressed_10_10_8K_MultiMetric(b *testing.B) { benchPushMetricData(b, 10, 10, 8*1024, true, true) } diff --git a/exporter/splunkhecexporter/internal/integrationtestutils/config_helper.go b/exporter/splunkhecexporter/internal/integrationtestutils/config_helper.go index 86c0685e4c4d..98999663db9a 100644 --- a/exporter/splunkhecexporter/internal/integrationtestutils/config_helper.go +++ b/exporter/splunkhecexporter/internal/integrationtestutils/config_helper.go @@ -10,7 +10,7 @@ import ( "gopkg.in/yaml.v3" ) -var configFilePth = "./testdata/integration_tests_config.yaml" +var configFilePath = "./testdata/integration_tests_config.yaml" type IntegrationTestsConfig struct { Host string `yaml:"HOST"` @@ -28,7 +28,7 @@ type IntegrationTestsConfig struct { func GetConfigVariable(key string) string { // Read YAML file - fileData, err := os.ReadFile(configFilePth) + fileData, err := os.ReadFile(configFilePath) if err != nil { fmt.Println("Error reading file:", err) } @@ -70,7 +70,7 @@ func GetConfigVariable(key string) string { func SetConfigVariable(key string, value string) { // Read YAML file - fileData, err := os.ReadFile(configFilePth) + fileData, err := os.ReadFile(configFilePath) if err != nil { fmt.Println("Error reading file:", err) } @@ -102,7 +102,7 @@ func SetConfigVariable(key string, value string) { } // Write yaml file - err = os.WriteFile(configFilePth, newData, 0o600) + err = os.WriteFile(configFilePath, newData, 0o600) if err != nil { fmt.Printf("Error writing file: %v", err) return diff --git a/exporter/sumologicexporter/README.md b/exporter/sumologicexporter/README.md index 0eac361cd157..b6463b3392c3 100644 --- a/exporter/sumologicexporter/README.md +++ b/exporter/sumologicexporter/README.md @@ -17,7 +17,7 @@ **This exporter is undergoing major changes right now.** -For some time we have been developing the [new Sumo Logic exporter](https://github.com/SumoLogic/sumologic-otel-collector/tree/main/pkg/exporter/sumologicexporter#sumo-logic-exporter) and now we are in the process of moving it into this repository. +We are in the process of [moving the Sumo Logic exporter into this repository](https://github.com/SumoLogic/sumologic-otel-collector/pull/1601). The following options are no longer supported: @@ -69,8 +69,7 @@ After the new exporter will be moved to this repository: ## Configuration This exporter supports sending logs and metrics data to [Sumo Logic](https://www.sumologic.com/). -Traces are exported using native otlphttp exporter as described -[here](https://help.sumologic.com/Traces/Getting_Started_with_Transaction_Tracing) +Traces are exported using the [native otlphttp exporter](https://help.sumologic.com/Traces/Getting_Started_with_Transaction_Tracing). Configuration is specified via the yaml in the following structure: diff --git a/exporter/sumologicexporter/exporter_test.go b/exporter/sumologicexporter/exporter_test.go index 3347deb7e1a9..e042b7db6f9a 100644 --- a/exporter/sumologicexporter/exporter_test.go +++ b/exporter/sumologicexporter/exporter_test.go @@ -234,7 +234,7 @@ func TestPartiallyFailed(t *testing.T) { assert.Equal(t, logsExpected, partial.Data()) } -func TestInvalidHTTPCLient(t *testing.T) { +func TestInvalidHTTPClient(t *testing.T) { clientConfig := confighttp.NewDefaultClientConfig() clientConfig.Endpoint = "test_endpoint" clientConfig.TLSSetting = configtls.ClientConfig{ diff --git a/exporter/sumologicexporter/prometheus_formatter.go b/exporter/sumologicexporter/prometheus_formatter.go index b33a18398b10..24d64ba279f7 100644 --- a/exporter/sumologicexporter/prometheus_formatter.go +++ b/exporter/sumologicexporter/prometheus_formatter.go @@ -96,7 +96,7 @@ func formatKeyValuePair(key []byte, value string) string { // stringsJoinAndSurround joins the strings in s slice using the separator adds front // to the front of the resulting string and back at the end. // -// This has a benefit over using the strings.Join() of using just one strings.Buidler +// This has a benefit over using the strings.Join() of using just one strings.Builder // instance and hence using less allocations to produce the final string. func stringsJoinAndSurround(s []string, separator, front, back string) string { switch len(s) { diff --git a/exporter/sumologicexporter/sender.go b/exporter/sumologicexporter/sender.go index d52cfcbcdb3f..6498e8fc8f84 100644 --- a/exporter/sumologicexporter/sender.go +++ b/exporter/sumologicexporter/sender.go @@ -213,7 +213,7 @@ func (s *sender) handleReceiverResponse(resp *http.Response) error { s.updateStickySessionCookie(resp) } - // API responds with a 200 or 204 with ConentLength set to 0 when all data + // API responds with a 200 or 204 with ContentLength set to 0 when all data // has been successfully ingested. if resp.ContentLength == 0 && (resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNoContent) { return nil @@ -722,11 +722,11 @@ func (s *sender) recordMetrics(duration time.Duration, count int64, req *http.Re } func (s *sender) addStickySessionCookie(req *http.Request) { - currectCookieValue := s.stickySessionCookieFunc() - if currectCookieValue != "" { + currentCookieValue := s.stickySessionCookieFunc() + if currentCookieValue != "" { cookie := &http.Cookie{ Name: stickySessionKey, - Value: currectCookieValue, + Value: currentCookieValue, } req.AddCookie(cookie) } diff --git a/exporter/sumologicexporter/sender_test.go b/exporter/sumologicexporter/sender_test.go index ea7ada4f09d2..6b2ce1820e19 100644 --- a/exporter/sumologicexporter/sender_test.go +++ b/exporter/sumologicexporter/sender_test.go @@ -41,7 +41,7 @@ type senderTest struct { } // prepareSenderTest prepares sender test environment. -// Provided cfgOpts additionally configure the sender after the sendible default +// Provided cfgOpts additionally configure the sender after the sensible default // for tests have been applied. // The enclosed httptest.Server is closed automatically using test.Cleanup. func prepareSenderTest(t *testing.T, compression configcompression.Type, cb []func(w http.ResponseWriter, req *http.Request), cfgOpts ...func(*Config)) *senderTest { @@ -1284,7 +1284,7 @@ func TestSendMetricsSplitFailedAll(t *testing.T) { } func TestSendMetricsUnexpectedFormat(t *testing.T) { - // Expect no requestes + // Expect no requests test := prepareSenderTest(t, NoCompression, nil) test.s.config.MetricFormat = "invalid" diff --git a/exporter/syslogexporter/README.md b/exporter/syslogexporter/README.md index da84b645e1ff..a54e4b3f2bc3 100644 --- a/exporter/syslogexporter/README.md +++ b/exporter/syslogexporter/README.md @@ -94,7 +94,7 @@ And here's the output message based on the above log record: <34>1 2003-10-11T22:14:15.003Z mymachine.example.com su - - - 'su root' failed for lonvick on /dev/pts/8 ``` -Here'a another example, this includes the structured data and other attributes: +Here's another example, this includes the structured data and other attributes: ```json { diff --git a/exporter/syslogexporter/config.go b/exporter/syslogexporter/config.go index a07526d3d565..35381b4d24e4 100644 --- a/exporter/syslogexporter/config.go +++ b/exporter/syslogexporter/config.go @@ -34,7 +34,7 @@ type Config struct { // options: rfc5424, rfc3164 Protocol string `mapstructure:"protocol"` - // Wether or not to enable RFC 6587 Octet Counting. + // Whether or not to enable RFC 6587 Octet Counting. EnableOctetCounting bool `mapstructure:"enable_octet_counting"` // TLSSetting struct exposes TLS client configuration. diff --git a/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go b/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go index d8e57c495527..aa66674d5c21 100644 --- a/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go +++ b/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go @@ -43,7 +43,7 @@ func createLogData(numberOfLogs int) plog.Logs { logs := plog.NewLogs() logs.ResourceLogs().AppendEmpty() // Add an empty ResourceLogs rl := logs.ResourceLogs().AppendEmpty() - rl.Resource().Attributes().PutStr("resouceKey", "resourceValue") + rl.Resource().Attributes().PutStr("resourceKey", "resourceValue") rl.Resource().Attributes().PutStr(conventions.AttributeServiceName, "test-log-service-exporter") rl.Resource().Attributes().PutStr(conventions.AttributeHostName, "test-host") sl := rl.ScopeLogs().AppendEmpty() diff --git a/exporter/tencentcloudlogserviceexporter/testdata/logservice_log_data.json b/exporter/tencentcloudlogserviceexporter/testdata/logservice_log_data.json index 6063ea49fc43..33e09a47c161 100644 --- a/exporter/tencentcloudlogserviceexporter/testdata/logservice_log_data.json +++ b/exporter/tencentcloudlogserviceexporter/testdata/logservice_log_data.json @@ -10,7 +10,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -64,7 +64,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -118,7 +118,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -172,7 +172,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -226,7 +226,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -280,7 +280,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -334,7 +334,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -388,7 +388,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name", @@ -442,7 +442,7 @@ }, { "Key": "resource", - "Value": "{\"resouceKey\":\"resourceValue\"}" + "Value": "{\"resourceKey\":\"resourceValue\"}" }, { "Key": "otlp.name",