From 7519c45053db0f1b21e165b95baa771763cb1759 Mon Sep 17 00:00:00 2001 From: alex boten <223565+codeboten@users.noreply.github.com> Date: Mon, 9 Feb 2026 13:03:20 -0800 Subject: [PATCH 1/5] otelconf: move experimental types to otelconf/x The otelconf package now only includes the subset of features that are stable in the configuration schema. The package otelconf/x must be used for anyone looking to utilize experimental types. Signed-off-by: alex boten <223565+codeboten@users.noreply.github.com> --- Makefile | 4 +- otelconf/config_json.go | 171 +-- otelconf/config_test.go | 359 +---- otelconf/config_yaml.go | 66 +- otelconf/generated_config.go | 747 ---------- otelconf/log.go | 4 - otelconf/log_test.go | 4 +- otelconf/metric.go | 102 -- otelconf/metric_test.go | 163 --- otelconf/resource.go | 22 - otelconf/trace.go | 4 - otelconf/trace_test.go | 4 +- otelconf/x/config.go | 215 +++ otelconf/x/config_common.go | 323 +++++ otelconf/x/config_json.go | 943 +++++++++++++ otelconf/x/config_test.go | 2379 ++++++++++++++++++++++++++++++++ otelconf/x/config_yaml.go | 553 ++++++++ otelconf/x/doc.go | 10 + otelconf/x/example_test.go | 49 + otelconf/x/fuzz_test.go | 64 + otelconf/x/generated_config.go | 2368 +++++++++++++++++++++++++++++++ otelconf/x/log.go | 234 ++++ otelconf/x/log_test.go | 906 ++++++++++++ otelconf/x/metric.go | 587 ++++++++ otelconf/x/metric_test.go | 1623 ++++++++++++++++++++++ otelconf/x/propagator.go | 58 + otelconf/x/propagator_test.go | 178 +++ otelconf/x/resource.go | 57 + otelconf/x/resource_test.go | 83 ++ otelconf/x/trace.go | 313 +++++ otelconf/x/trace_test.go | 1065 ++++++++++++++ 31 files changed, 12047 insertions(+), 1611 deletions(-) create mode 100644 otelconf/x/config.go create mode 100644 otelconf/x/config_common.go create mode 100644 otelconf/x/config_json.go create mode 100644 otelconf/x/config_test.go create mode 100644 otelconf/x/config_yaml.go create mode 100644 otelconf/x/doc.go create mode 100644 otelconf/x/example_test.go create mode 100644 otelconf/x/fuzz_test.go create mode 100644 otelconf/x/generated_config.go create mode 100644 otelconf/x/log.go create mode 100644 otelconf/x/log_test.go create mode 100644 otelconf/x/metric.go create mode 100644 otelconf/x/metric_test.go create mode 100644 otelconf/x/propagator.go create mode 100644 otelconf/x/propagator_test.go create mode 100644 otelconf/x/resource.go create mode 100644 otelconf/x/resource_test.go create mode 100644 otelconf/x/trace.go create mode 100644 otelconf/x/trace_test.go diff --git a/Makefile b/Makefile index 3bd1fbd3463..7646a64b851 100644 --- a/Makefile +++ b/Makefile @@ -326,7 +326,7 @@ OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR=tmp/opentelemetry-configuration genjsonschema-cleanup: rm -Rf ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR} -GENERATED_CONFIG=./otelconf/generated_config.go +GENERATED_CONFIG=./otelconf/x/generated_config.go # Generate structs for configuration from opentelemetry-configuration schema genjsonschema: genjsonschema-cleanup $(GOJSONSCHEMA) @@ -336,7 +336,7 @@ genjsonschema: genjsonschema-cleanup $(GOJSONSCHEMA) --capitalization ID \ --capitalization OTLP \ --struct-name-from-title \ - --package otelconf \ + --package x \ --only-models \ --output ${GENERATED_CONFIG} \ ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR}/opentelemetry_configuration.json diff --git a/otelconf/config_json.go b/otelconf/config_json.go index 30695413faf..661d7802f32 100644 --- a/otelconf/config_json.go +++ b/otelconf/config_json.go @@ -122,121 +122,6 @@ func (j *TraceContextPropagator) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *ExperimentalContainerResourceDetector) UnmarshalJSON(b []byte) error { - type plain ExperimentalContainerResourceDetector - var p plain - if err := json.Unmarshal(b, &p); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - // If key is present (even if empty object), ensure non-nil value. - if p == nil { - *j = ExperimentalContainerResourceDetector{} - } else { - *j = ExperimentalContainerResourceDetector(p) - } - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *ExperimentalHostResourceDetector) UnmarshalJSON(b []byte) error { - type plain ExperimentalHostResourceDetector - var p plain - if err := json.Unmarshal(b, &p); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - // If key is present (even if empty object), ensure non-nil value. - if p == nil { - *j = ExperimentalHostResourceDetector{} - } else { - *j = ExperimentalHostResourceDetector(p) - } - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *ExperimentalProcessResourceDetector) UnmarshalJSON(b []byte) error { - type plain ExperimentalProcessResourceDetector - var p plain - if err := json.Unmarshal(b, &p); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - // If key is present (even if empty object), ensure non-nil value. - if p == nil { - *j = ExperimentalProcessResourceDetector{} - } else { - *j = ExperimentalProcessResourceDetector(p) - } - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *ExperimentalServiceResourceDetector) UnmarshalJSON(b []byte) error { - type plain ExperimentalServiceResourceDetector - var p plain - if err := json.Unmarshal(b, &p); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - // If key is present (even if empty object), ensure non-nil value. - if p == nil { - *j = ExperimentalServiceResourceDetector{} - } else { - *j = ExperimentalServiceResourceDetector(p) - } - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *ExperimentalResourceDetector) UnmarshalJSON(b []byte) error { - // Use a shadow struct with a RawMessage field to detect key presence. - type Plain ExperimentalResourceDetector - type shadow struct { - Plain - Container json.RawMessage `json:"container"` - Host json.RawMessage `json:"host"` - Process json.RawMessage `json:"process"` - Service json.RawMessage `json:"service"` - } - var sh shadow - if err := json.Unmarshal(b, &sh); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - - if sh.Container != nil { - var c ExperimentalContainerResourceDetector - if err := json.Unmarshal(sh.Container, &c); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - sh.Plain.Container = c - } - - if sh.Host != nil { - var c ExperimentalHostResourceDetector - if err := json.Unmarshal(sh.Host, &c); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - sh.Plain.Host = c - } - - if sh.Process != nil { - var c ExperimentalProcessResourceDetector - if err := json.Unmarshal(sh.Process, &c); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - sh.Plain.Process = c - } - - if sh.Service != nil { - var c ExperimentalServiceResourceDetector - if err := json.Unmarshal(sh.Service, &c); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - sh.Plain.Service = c - } - *j = ExperimentalResourceDetector(sh.Plain) - return nil -} - // UnmarshalJSON implements json.Unmarshaler. func (j *PushMetricExporter) UnmarshalJSON(b []byte) error { // Use a shadow struct with a RawMessage field to detect key presence. @@ -433,16 +318,15 @@ func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { type Plain OpenTelemetryConfiguration type shadow struct { Plain - FileFormat json.RawMessage `json:"file_format"` - LoggerProvider json.RawMessage `json:"logger_provider"` - MeterProvider json.RawMessage `json:"meter_provider"` - TracerProvider json.RawMessage `json:"tracer_provider"` - Propagator json.RawMessage `json:"propagator"` - Resource json.RawMessage `json:"resource"` - InstrumentationDevelopment json.RawMessage `json:"instrumentation/development"` - AttributeLimits json.RawMessage `json:"attribute_limits"` - Disabled json.RawMessage `json:"disabled"` - LogLevel json.RawMessage `json:"log_level"` + FileFormat json.RawMessage `json:"file_format"` + LoggerProvider json.RawMessage `json:"logger_provider"` + MeterProvider json.RawMessage `json:"meter_provider"` + TracerProvider json.RawMessage `json:"tracer_provider"` + Propagator json.RawMessage `json:"propagator"` + Resource json.RawMessage `json:"resource"` + AttributeLimits json.RawMessage `json:"attribute_limits"` + Disabled json.RawMessage `json:"disabled"` + LogLevel json.RawMessage `json:"log_level"` } var sh shadow if err := json.Unmarshal(b, &sh); err != nil { @@ -497,14 +381,6 @@ func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { sh.Plain.Resource = &r } - if sh.InstrumentationDevelopment != nil { - var r ExperimentalInstrumentation - if err := json.Unmarshal(sh.InstrumentationDevelopment, &r); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - sh.Plain.InstrumentationDevelopment = &r - } - if sh.AttributeLimits != nil { var r AttributeLimits if err := json.Unmarshal(sh.AttributeLimits, &r); err != nil { @@ -846,35 +722,6 @@ func (j *InstrumentType) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *ExperimentalPeerServiceMapping) UnmarshalJSON(b []byte) error { - type Plain ExperimentalPeerServiceMapping - type shadow struct { - Plain - Peer json.RawMessage `json:"peer"` - Service json.RawMessage `json:"service"` - } - var sh shadow - if err := json.Unmarshal(b, &sh); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - if sh.Peer == nil { - return newErrRequired(j, "peer") - } - if err := json.Unmarshal(sh.Peer, &sh.Plain.Peer); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - if sh.Service == nil { - return newErrRequired(j, "service") - } - if err := json.Unmarshal(sh.Service, &sh.Plain.Service); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - - *j = ExperimentalPeerServiceMapping(sh.Plain) - return nil -} - // UnmarshalJSON implements json.Unmarshaler. func (j *ExporterDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { var v string diff --git a/otelconf/config_test.go b/otelconf/config_test.go index 0b0471de780..8ba54227d12 100644 --- a/otelconf/config_test.go +++ b/otelconf/config_test.go @@ -437,96 +437,9 @@ var v10OpenTelemetryConfig = OpenTelemetryConfiguration{ AttributeCountLimit: ptr(128), AttributeValueLengthLimit: ptr(4096), }, - InstrumentationDevelopment: &ExperimentalInstrumentation{ - Cpp: ExperimentalLanguageSpecificInstrumentation{ - "example": map[string]any{ - "property": "value", - }, - }, - Dotnet: ExperimentalLanguageSpecificInstrumentation{ - "example": map[string]any{ - "property": "value", - }, - }, - Erlang: ExperimentalLanguageSpecificInstrumentation{ - "example": map[string]any{ - "property": "value", - }, - }, - General: &ExperimentalGeneralInstrumentation{ - Http: &ExperimentalHttpInstrumentation{ - Client: &ExperimentalHttpClientInstrumentation{ - RequestCapturedHeaders: []string{"Content-Type", "Accept"}, - ResponseCapturedHeaders: []string{"Content-Type", "Content-Encoding"}, - }, - Server: &ExperimentalHttpServerInstrumentation{ - RequestCapturedHeaders: []string{"Content-Type", "Accept"}, - ResponseCapturedHeaders: []string{"Content-Type", "Content-Encoding"}, - }, - }, - Peer: &ExperimentalPeerInstrumentation{ - ServiceMapping: []ExperimentalPeerServiceMapping{ - {Peer: "1.2.3.4", Service: "FooService"}, - {Peer: "2.3.4.5", Service: "BarService"}, - }, - }, - }, - Go: ExperimentalLanguageSpecificInstrumentation{ - "example": map[string]any{ - "property": "value", - }, - }, - Java: ExperimentalLanguageSpecificInstrumentation{ - "example": map[string]any{ - "property": "value", - }, - }, - Js: ExperimentalLanguageSpecificInstrumentation{ - "example": map[string]any{ - "property": "value", - }, - }, - Php: ExperimentalLanguageSpecificInstrumentation{ - "example": map[string]any{ - "property": "value", - }, - }, - Python: ExperimentalLanguageSpecificInstrumentation{ - "example": map[string]any{ - "property": "value", - }, - }, - Ruby: ExperimentalLanguageSpecificInstrumentation{ - "example": map[string]any{ - "property": "value", - }, - }, - Rust: ExperimentalLanguageSpecificInstrumentation{ - "example": map[string]any{ - "property": "value", - }, - }, - Swift: ExperimentalLanguageSpecificInstrumentation{ - "example": map[string]any{ - "property": "value", - }, - }, - }, + LogLevel: ptr(SeverityNumberInfo), LoggerProvider: &LoggerProvider{ - LoggerConfiguratorDevelopment: &ExperimentalLoggerConfigurator{ - DefaultConfig: &ExperimentalLoggerConfig{ - Disabled: ptr(true), - }, - Loggers: []ExperimentalLoggerMatcherAndConfig{ - { - Config: ExperimentalLoggerConfig{ - Disabled: ptr(false), - }, - Name: "io.opentelemetry.contrib.*", - }, - }, - }, Limits: &LogRecordLimits{ AttributeCountLimit: ptr(128), AttributeValueLengthLimit: ptr(4096), @@ -580,20 +493,12 @@ var v10OpenTelemetryConfig = OpenTelemetryConfiguration{ }, { Batch: &BatchLogRecordProcessor{ - Exporter: LogRecordExporter{ - OTLPFileDevelopment: &ExperimentalOTLPFileExporter{ - OutputStream: ptr("file:///var/log/logs.jsonl"), - }, - }, + Exporter: LogRecordExporter{}, }, }, { Batch: &BatchLogRecordProcessor{ - Exporter: LogRecordExporter{ - OTLPFileDevelopment: &ExperimentalOTLPFileExporter{ - OutputStream: ptr("stdout"), - }, - }, + Exporter: LogRecordExporter{}, }, }, { @@ -607,19 +512,6 @@ var v10OpenTelemetryConfig = OpenTelemetryConfiguration{ }, MeterProvider: &MeterProvider{ ExemplarFilter: ptr(ExemplarFilter("trace_based")), - MeterConfiguratorDevelopment: &ExperimentalMeterConfigurator{ - DefaultConfig: &ExperimentalMeterConfig{ - Disabled: ptr(true), - }, - Meters: []ExperimentalMeterMatcherAndConfig{ - { - Config: ExperimentalMeterConfig{ - Disabled: ptr(false), - }, - Name: "io.opentelemetry.contrib.*", - }, - }, - }, Readers: []MetricReader{ { Pull: &PullMetricReader{ @@ -638,18 +530,7 @@ var v10OpenTelemetryConfig = OpenTelemetryConfiguration{ ObservableUpDownCounter: ptr(2000), UpDownCounter: ptr(2000), }, - Exporter: PullMetricExporter{ - PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{ - Host: ptr("localhost"), - Port: ptr(9464), - TranslationStrategy: ptr(ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithSuffixes), - WithResourceConstantLabels: &IncludeExclude{ - Excluded: []string{"service.attr1"}, - Included: []string{"service*"}, - }, - WithoutScopeInfo: ptr(false), - }, - }, + Exporter: PullMetricExporter{}, }, }, { @@ -719,24 +600,12 @@ var v10OpenTelemetryConfig = OpenTelemetryConfiguration{ }, { Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLPFileDevelopment: &ExperimentalOTLPFileMetricExporter{ - OutputStream: ptr("file:///var/log/metrics.jsonl"), - DefaultHistogramAggregation: ptr(ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram), - TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), - }, - }, + Exporter: PushMetricExporter{}, }, }, { Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLPFileDevelopment: &ExperimentalOTLPFileMetricExporter{ - OutputStream: ptr("stdout"), - DefaultHistogramAggregation: ptr(ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram), - TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), - }, - }, + Exporter: PushMetricExporter{}, }, }, { @@ -811,34 +680,8 @@ var v10OpenTelemetryConfig = OpenTelemetryConfiguration{ {Name: "double_array_key", Type: ptr(AttributeTypeDoubleArray), Value: []any{1.1, 2.2}}, }, AttributesList: ptr("service.namespace=my-namespace,service.version=1.0.0"), - DetectionDevelopment: &ExperimentalResourceDetection{ - Attributes: &IncludeExclude{ - Excluded: []string{"process.command_args"}, - Included: []string{"process.*"}, - }, - Detectors: []ExperimentalResourceDetector{ - {Container: ExperimentalContainerResourceDetector{}}, - {Host: ExperimentalHostResourceDetector{}}, - {Process: ExperimentalProcessResourceDetector{}}, - {Service: ExperimentalServiceResourceDetector{}}, - }, - }, }, TracerProvider: &TracerProvider{ - TracerConfiguratorDevelopment: &ExperimentalTracerConfigurator{ - DefaultConfig: &ExperimentalTracerConfig{ - Disabled: ptr(true), - }, - Tracers: []ExperimentalTracerMatcherAndConfig{ - { - Config: ExperimentalTracerConfig{ - Disabled: ptr(false), - }, - Name: "io.opentelemetry.contrib.*", - }, - }, - }, - Limits: &SpanLimits{ AttributeCountLimit: ptr(128), AttributeValueLengthLimit: ptr(4096), @@ -896,20 +739,12 @@ var v10OpenTelemetryConfig = OpenTelemetryConfiguration{ }, { Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{ - OTLPFileDevelopment: &ExperimentalOTLPFileExporter{ - OutputStream: ptr("file:///var/log/traces.jsonl"), - }, - }, + Exporter: SpanExporter{}, }, }, { Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{ - OTLPFileDevelopment: &ExperimentalOTLPFileExporter{ - OutputStream: ptr("stdout"), - }, - }, + Exporter: SpanExporter{}, }, }, { @@ -1117,12 +952,6 @@ func TestUnmarshalOpenTelemetryConfiguration(t *testing.T) { yamlConfig: []byte("attribute_limits: []\nfile_format: 1.0"), wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), }, - { - name: "instrumentation invalid", - jsonConfig: []byte(`{"instrumentation/development":[], "file_format": "1.0"}`), - yamlConfig: []byte("instrumentation/development: []\nfile_format: 1.0"), - wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), - }, { name: "log_level invalid", jsonConfig: []byte(`{"log_level":[], "file_format": "1.0"}`), @@ -2112,68 +1941,6 @@ func TestUnmarshalInstrumentType(t *testing.T) { } } -func TestUnmarshalExperimentalPeerServiceMappingType(t *testing.T) { - for _, tt := range []struct { - name string - yamlConfig []byte - jsonConfig []byte - wantErrT error - wantExperimentalPeerServiceMapping ExperimentalPeerServiceMapping - }{ - { - name: "invalid data", - jsonConfig: []byte(`{:2000}`), - yamlConfig: []byte("peer: []\nservice: true"), - wantErrT: newErrUnmarshal(&ExperimentalPeerServiceMapping{}), - }, - { - name: "missing required peer field", - jsonConfig: []byte(`{}`), - yamlConfig: []byte("{}"), - wantErrT: newErrRequired(&ExperimentalPeerServiceMapping{}, "peer"), - }, - { - name: "missing required service field", - jsonConfig: []byte(`{"peer":"test"}`), - yamlConfig: []byte("peer: test"), - wantErrT: newErrRequired(&ExperimentalPeerServiceMapping{}, "service"), - }, - { - name: "invalid string_array peer", - jsonConfig: []byte(`{"peer":[], "service": ["test-val", "test-val-2"], "type": "string_array"}`), - yamlConfig: []byte("peer: []\nservice: [test-val, test-val-2]\ntype: string_array\n"), - wantErrT: newErrUnmarshal(&ExperimentalPeerServiceMapping{}), - }, - { - name: "valid string service", - jsonConfig: []byte(`{"peer":"test", "service": "test-val"}`), - yamlConfig: []byte("peer: test\nservice: test-val"), - wantExperimentalPeerServiceMapping: ExperimentalPeerServiceMapping{ - Peer: "test", - Service: "test-val", - }, - }, - { - name: "invalid string_array service", - jsonConfig: []byte(`{"peer":"test", "service": ["test-val", "test-val-2"], "type": "string_array"}`), - yamlConfig: []byte("peer: test\nservice: [test-val, test-val-2]\ntype: string_array\n"), - wantErrT: newErrUnmarshal(&ExperimentalPeerServiceMapping{}), - }, - } { - t.Run(tt.name, func(t *testing.T) { - val := ExperimentalPeerServiceMapping{} - err := val.UnmarshalJSON(tt.jsonConfig) - assert.ErrorIs(t, err, tt.wantErrT) - assert.Equal(t, tt.wantExperimentalPeerServiceMapping, val) - - val = ExperimentalPeerServiceMapping{} - err = yaml.Unmarshal(tt.yamlConfig, &val) - assert.ErrorIs(t, err, tt.wantErrT) - assert.Equal(t, tt.wantExperimentalPeerServiceMapping, val) - }) - } -} - func TestUnmarshalExporterDefaultHistogramAggregation(t *testing.T) { var exporterDefaultHistogramAggregation ExporterDefaultHistogramAggregation for _, tt := range []struct { @@ -2228,7 +1995,7 @@ func TestUnmarshalPullMetricReader(t *testing.T) { name: "valid with proemtheus exporter", jsonConfig: []byte(`{"exporter":{"prometheus/development":{}}}`), yamlConfig: []byte("exporter:\n prometheus/development: {}"), - wantExporter: PullMetricExporter{PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{}}, + wantExporter: PullMetricExporter{}, }, { name: "missing required exporter field", @@ -2236,12 +2003,6 @@ func TestUnmarshalPullMetricReader(t *testing.T) { yamlConfig: []byte("{}"), wantErrT: newErrRequired(&PullMetricReader{}, "exporter"), }, - { - name: "invalid data", - jsonConfig: []byte(`{:2000}`), - yamlConfig: []byte("exporter:\n prometheus/development: []"), - wantErrT: newErrUnmarshal(&PullMetricReader{}), - }, } { t.Run(tt.name, func(t *testing.T) { cl := PullMetricReader{} @@ -2266,102 +2027,16 @@ func TestUnmarshalResourceJson(t *testing.T) { wantResource Resource }{ { - name: "valid with all detectors", - jsonConfig: []byte(`{"detection/development": {"detectors": [{"container": null},{"host": null},{"process": null},{"service": null}]}}`), - yamlConfig: []byte("detection/development:\n detectors:\n - container:\n - host:\n - process:\n - service:"), - wantResource: Resource{ - DetectionDevelopment: &ExperimentalResourceDetection{ - Detectors: []ExperimentalResourceDetector{ - { - Container: ExperimentalContainerResourceDetector{}, - }, - { - Host: ExperimentalHostResourceDetector{}, - }, - { - Process: ExperimentalProcessResourceDetector{}, - }, - { - Service: ExperimentalServiceResourceDetector{}, - }, - }, - }, - }, - }, - { - name: "valid non-nil with all detectors", - jsonConfig: []byte(`{"detection/development": {"detectors": [{"container": {}},{"host": {}},{"process": {}},{"service": {}}]}}`), - yamlConfig: []byte("detection/development:\n detectors:\n - container: {}\n - host: {}\n - process: {}\n - service: {}"), - wantResource: Resource{ - DetectionDevelopment: &ExperimentalResourceDetection{ - Detectors: []ExperimentalResourceDetector{ - { - Container: ExperimentalContainerResourceDetector{}, - }, - { - Host: ExperimentalHostResourceDetector{}, - }, - { - Process: ExperimentalProcessResourceDetector{}, - }, - { - Service: ExperimentalServiceResourceDetector{}, - }, - }, - }, - }, - }, - { - name: "invalid container detector", - jsonConfig: []byte(`{"detection/development": {"detectors": [{"container": 1}]}}`), - yamlConfig: []byte("detection/development:\n detectors:\n - container: 1"), - wantResource: Resource{ - DetectionDevelopment: &ExperimentalResourceDetection{ - Detectors: []ExperimentalResourceDetector{ - {}, - }, - }, - }, - wantErrT: newErrUnmarshal(&ExperimentalResourceDetector{}), + name: "valid with all detectors", + jsonConfig: []byte(`{"detection/development": {"detectors": [{"container": null},{"host": null},{"process": null},{"service": null}]}}`), + yamlConfig: []byte("detection/development:\n detectors:\n - container:\n - host:\n - process:\n - service:"), + wantResource: Resource{}, }, { - name: "invalid host detector", - jsonConfig: []byte(`{"detection/development": {"detectors": [{"host": 1}]}}`), - yamlConfig: []byte("detection/development:\n detectors:\n - host: 1"), - wantResource: Resource{ - DetectionDevelopment: &ExperimentalResourceDetection{ - Detectors: []ExperimentalResourceDetector{ - {}, - }, - }, - }, - wantErrT: newErrUnmarshal(&ExperimentalResourceDetector{}), - }, - { - name: "invalid service detector", - jsonConfig: []byte(`{"detection/development": {"detectors": [{"service": 1}]}}`), - yamlConfig: []byte("detection/development:\n detectors:\n - service: 1"), - wantResource: Resource{ - DetectionDevelopment: &ExperimentalResourceDetection{ - Detectors: []ExperimentalResourceDetector{ - {}, - }, - }, - }, - wantErrT: newErrUnmarshal(&ExperimentalResourceDetector{}), - }, - { - name: "invalid process detector", - jsonConfig: []byte(`{"detection/development": {"detectors": [{"process": 1}]}}`), - yamlConfig: []byte("detection/development:\n detectors:\n - process: 1"), - wantResource: Resource{ - DetectionDevelopment: &ExperimentalResourceDetection{ - Detectors: []ExperimentalResourceDetector{ - {}, - }, - }, - }, - wantErrT: newErrUnmarshal(&ExperimentalResourceDetector{}), + name: "valid non-nil with all detectors", + jsonConfig: []byte(`{"detection/development": {"detectors": [{"container": {}},{"host": {}},{"process": {}},{"service": {}}]}}`), + yamlConfig: []byte("detection/development:\n detectors:\n - container: {}\n - host: {}\n - process: {}\n - service: {}"), + wantResource: Resource{}, }, } { t.Run(tt.name, func(t *testing.T) { diff --git a/otelconf/config_yaml.go b/otelconf/config_yaml.go index bcb3da50474..6f226afd9ee 100644 --- a/otelconf/config_yaml.go +++ b/otelconf/config_yaml.go @@ -27,31 +27,6 @@ func hasYAMLMapKey(node *yaml.Node, key string) bool { } // UnmarshalYAML implements yaml.Unmarshaler. -func (j *ExperimentalResourceDetector) UnmarshalYAML(node *yaml.Node) error { - type Plain ExperimentalResourceDetector - var plain Plain - if err := node.Decode(&plain); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - // container can be nil, must check and set here - if hasYAMLMapKey(node, "container") && plain.Container == nil { - plain.Container = ExperimentalContainerResourceDetector{} - } - // host can be nil, must check and set here - if hasYAMLMapKey(node, "host") && plain.Host == nil { - plain.Host = ExperimentalHostResourceDetector{} - } - // process can be nil, must check and set here - if hasYAMLMapKey(node, "process") && plain.Process == nil { - plain.Process = ExperimentalProcessResourceDetector{} - } - // service can be nil, must check and set here - if hasYAMLMapKey(node, "service") && plain.Service == nil { - plain.Service = ExperimentalServiceResourceDetector{} - } - *j = ExperimentalResourceDetector(plain) - return nil -} // UnmarshalYAML implements yaml.Unmarshaler. func (j *PushMetricExporter) UnmarshalYAML(node *yaml.Node) error { @@ -76,16 +51,15 @@ func (j *OpenTelemetryConfiguration) UnmarshalYAML(node *yaml.Node) error { type Plain OpenTelemetryConfiguration type shadow struct { Plain - LogLevel *SeverityNumber `yaml:"log_level,omitempty"` - AttributeLimits *AttributeLimits `yaml:"attribute_limits,omitempty"` - Disabled *bool `yaml:"disabled,omitempty"` - FileFormat string `yaml:"file_format"` - LoggerProvider *LoggerProvider `yaml:"logger_provider,omitempty"` - MeterProvider *MeterProvider `yaml:"meter_provider,omitempty"` - TracerProvider *TracerProvider `yaml:"tracer_provider,omitempty"` - Propagator *Propagator `yaml:"propagator,omitempty"` - Resource *Resource `yaml:"resource,omitempty"` - InstrumentationDevelopment *ExperimentalInstrumentation `yaml:"instrumentation/development"` + LogLevel *SeverityNumber `yaml:"log_level,omitempty"` + AttributeLimits *AttributeLimits `yaml:"attribute_limits,omitempty"` + Disabled *bool `yaml:"disabled,omitempty"` + FileFormat string `yaml:"file_format"` + LoggerProvider *LoggerProvider `yaml:"logger_provider,omitempty"` + MeterProvider *MeterProvider `yaml:"meter_provider,omitempty"` + TracerProvider *TracerProvider `yaml:"tracer_provider,omitempty"` + Propagator *Propagator `yaml:"propagator,omitempty"` + Resource *Resource `yaml:"resource,omitempty"` } var sh shadow @@ -120,9 +94,6 @@ func (j *OpenTelemetryConfiguration) UnmarshalYAML(node *yaml.Node) error { if sh.Resource != nil { sh.Plain.Resource = sh.Resource } - if sh.InstrumentationDevelopment != nil { - sh.Plain.InstrumentationDevelopment = sh.InstrumentationDevelopment - } if sh.LogLevel != nil { sh.Plain.LogLevel = sh.LogLevel @@ -504,25 +475,6 @@ func (j *InstrumentType) UnmarshalYAML(node *yaml.Node) error { return nil } -// UnmarshalYAML implements yaml.Unmarshaler. -func (j *ExperimentalPeerServiceMapping) UnmarshalYAML(node *yaml.Node) error { - if !hasYAMLMapKey(node, "peer") { - return newErrRequired(j, "peer") - } - if !hasYAMLMapKey(node, "service") { - return newErrRequired(j, "service") - } - - type Plain ExperimentalPeerServiceMapping - var plain Plain - if err := node.Decode(&plain); err != nil { - return errors.Join(newErrUnmarshal(j), err) - } - - *j = ExperimentalPeerServiceMapping(plain) - return nil -} - // UnmarshalYAML implements yaml.Unmarshaler. func (j *ExporterDefaultHistogramAggregation) UnmarshalYAML(node *yaml.Node) error { type Plain ExporterDefaultHistogramAggregation diff --git a/otelconf/generated_config.go b/otelconf/generated_config.go index b881c108698..d0d8ffe290a 100644 --- a/otelconf/generated_config.go +++ b/otelconf/generated_config.go @@ -361,690 +361,6 @@ const ExemplarFilterAlwaysOff ExemplarFilter = "always_off" const ExemplarFilterAlwaysOn ExemplarFilter = "always_on" const ExemplarFilterTraceBased ExemplarFilter = "trace_based" -type ExperimentalComposableAlwaysOffSampler map[string]interface{} - -type ExperimentalComposableAlwaysOnSampler map[string]interface{} - -type ExperimentalComposableParentThresholdSampler struct { - // Sampler to use when there is no parent. - // Property is required and must be non-null. - // - Root ExperimentalComposableSampler `json:"root" yaml:"root" mapstructure:"root"` -} - -type ExperimentalComposableProbabilitySampler struct { - // Configure ratio. - // If omitted or null, 1.0 is used. - // - Ratio ExperimentalComposableProbabilitySamplerRatio `json:"ratio,omitempty" yaml:"ratio,omitempty" mapstructure:"ratio,omitempty"` -} - -// Configure ratio. -// If omitted or null, 1.0 is used. -type ExperimentalComposableProbabilitySamplerRatio *float64 - -type ExperimentalComposableRuleBasedSampler struct { - // The rules for the sampler, matched in order. If no rules match, the span is not - // sampled. - // If omitted or null, no span is sampled. - // - Rules *ExperimentalComposableRuleBasedSamplerRules `json:"rules,omitempty" yaml:"rules,omitempty" mapstructure:"rules,omitempty"` -} - -// A rule for ExperimentalComposableRuleBasedSampler. A rule can have multiple -// match conditions - the sampler will be applied if all match. -// If no conditions are specified, the rule matches all spans that reach it. -type ExperimentalComposableRuleBasedSamplerRule struct { - // Patterns to match against a single attribute. Non-string attributes are matched - // using their string representation: - // for example, a pattern of "4*" would match any http.response.status_code in - // 400-499. For array attributes, if any - // item matches, it is considered a match. - // If omitted, ignore. - // - AttributePatterns *ExperimentalComposableRuleBasedSamplerRuleAttributePatterns `json:"attribute_patterns,omitempty" yaml:"attribute_patterns,omitempty" mapstructure:"attribute_patterns,omitempty"` - - // Values to match against a single attribute. Non-string attributes are matched - // using their string representation: - // for example, a value of "404" would match the http.response.status_code 404. - // For array attributes, if any - // item matches, it is considered a match. - // If omitted, ignore. - // - AttributeValues *ExperimentalComposableRuleBasedSamplerRuleAttributeValues `json:"attribute_values,omitempty" yaml:"attribute_values,omitempty" mapstructure:"attribute_values,omitempty"` - - // The parent span types to match. - // Values include: - // * local: local, a local parent. - // * none: none, no parent, i.e., the trace root. - // * remote: remote, a remote parent. - // If omitted, ignore. - // - Parent []ExperimentalSpanParent `json:"parent,omitempty" yaml:"parent,omitempty" mapstructure:"parent,omitempty"` - - // The sampler to use for matching spans. - // Property is required and must be non-null. - // - Sampler ExperimentalComposableSampler `json:"sampler" yaml:"sampler" mapstructure:"sampler"` - - // The span kinds to match. If the span's kind matches any of these, it matches. - // Values include: - // * client: client, a client span. - // * consumer: consumer, a consumer span. - // * internal: internal, an internal span. - // * producer: producer, a producer span. - // * server: server, a server span. - // If omitted, ignore. - // - SpanKinds []SpanKind `json:"span_kinds,omitempty" yaml:"span_kinds,omitempty" mapstructure:"span_kinds,omitempty"` -} - -type ExperimentalComposableRuleBasedSamplerRuleAttributePatterns struct { - // Configure list of value patterns to exclude. Applies after .included (i.e. - // excluded has higher priority than included). - // Values are evaluated to match as follows: - // * If the value exactly matches. - // * If the value matches the wildcard pattern, where '?' matches any single - // character and '*' matches any number of characters including none. - // If omitted, .included attributes are included. - // - Excluded []string `json:"excluded,omitempty" yaml:"excluded,omitempty" mapstructure:"excluded,omitempty"` - - // Configure list of value patterns to include. - // Values are evaluated to match as follows: - // * If the value exactly matches. - // * If the value matches the wildcard pattern, where '?' matches any single - // character and '*' matches any number of characters including none. - // If omitted, all values are included. - // - Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` - - // The attribute key to match against. - // Property is required and must be non-null. - // - Key string `json:"key" yaml:"key" mapstructure:"key"` -} - -type ExperimentalComposableRuleBasedSamplerRuleAttributeValues struct { - // The attribute key to match against. - // Property is required and must be non-null. - // - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // The attribute values to match against. If the attribute's value matches any of - // these, it matches. - // Property is required and must be non-null. - // - Values []string `json:"values" yaml:"values" mapstructure:"values"` -} - -// The rules for the sampler, matched in order. If no rules match, the span is not -// sampled. -// If omitted or null, no span is sampled. -type ExperimentalComposableRuleBasedSamplerRules []ExperimentalComposableRuleBasedSamplerRule - -type ExperimentalComposableSampler struct { - // Configure sampler to be always_off. - // If omitted, ignore. - // - AlwaysOff ExperimentalComposableAlwaysOffSampler `json:"always_off,omitempty" yaml:"always_off,omitempty" mapstructure:"always_off,omitempty"` - - // Configure sampler to be always_on. - // If omitted, ignore. - // - AlwaysOn ExperimentalComposableAlwaysOnSampler `json:"always_on,omitempty" yaml:"always_on,omitempty" mapstructure:"always_on,omitempty"` - - // Configure sampler to be parent_threshold. - // If omitted, ignore. - // - ParentThreshold *ExperimentalComposableParentThresholdSampler `json:"parent_threshold,omitempty" yaml:"parent_threshold,omitempty" mapstructure:"parent_threshold,omitempty"` - - // Configure sampler to be probability. - // If omitted, ignore. - // - Probability *ExperimentalComposableProbabilitySampler `json:"probability,omitempty" yaml:"probability,omitempty" mapstructure:"probability,omitempty"` - - // Configure sampler to be rule_based. - // If omitted, ignore. - // - RuleBased *ExperimentalComposableRuleBasedSampler `json:"rule_based,omitempty" yaml:"rule_based,omitempty" mapstructure:"rule_based,omitempty"` - - AdditionalProperties interface{} `mapstructure:",remain"` -} - -type ExperimentalContainerResourceDetector map[string]interface{} - -type ExperimentalGeneralInstrumentation struct { - // Configure instrumentations following the http semantic conventions. - // See http semantic conventions: - // https://opentelemetry.io/docs/specs/semconv/http/ - // If omitted, defaults as described in ExperimentalHttpInstrumentation are used. - // - Http *ExperimentalHttpInstrumentation `json:"http,omitempty" yaml:"http,omitempty" mapstructure:"http,omitempty"` - - // Configure instrumentations following the peer semantic conventions. - // See peer semantic conventions: - // https://opentelemetry.io/docs/specs/semconv/attributes-registry/peer/ - // If omitted, defaults as described in ExperimentalPeerInstrumentation are used. - // - Peer *ExperimentalPeerInstrumentation `json:"peer,omitempty" yaml:"peer,omitempty" mapstructure:"peer,omitempty"` -} - -type ExperimentalHostResourceDetector map[string]interface{} - -type ExperimentalHttpClientInstrumentation struct { - // Configure headers to capture for outbound http requests. - // If omitted, no outbound request headers are captured. - // - RequestCapturedHeaders []string `json:"request_captured_headers,omitempty" yaml:"request_captured_headers,omitempty" mapstructure:"request_captured_headers,omitempty"` - - // Configure headers to capture for inbound http responses. - // If omitted, no inbound response headers are captured. - // - ResponseCapturedHeaders []string `json:"response_captured_headers,omitempty" yaml:"response_captured_headers,omitempty" mapstructure:"response_captured_headers,omitempty"` -} - -type ExperimentalHttpInstrumentation struct { - // Configure instrumentations following the http client semantic conventions. - // If omitted, defaults as described in ExperimentalHttpClientInstrumentation are - // used. - // - Client *ExperimentalHttpClientInstrumentation `json:"client,omitempty" yaml:"client,omitempty" mapstructure:"client,omitempty"` - - // Configure instrumentations following the http server semantic conventions. - // If omitted, defaults as described in ExperimentalHttpServerInstrumentation are - // used. - // - Server *ExperimentalHttpServerInstrumentation `json:"server,omitempty" yaml:"server,omitempty" mapstructure:"server,omitempty"` -} - -type ExperimentalHttpServerInstrumentation struct { - // Configure headers to capture for inbound http requests. - // If omitted, no request headers are captured. - // - RequestCapturedHeaders []string `json:"request_captured_headers,omitempty" yaml:"request_captured_headers,omitempty" mapstructure:"request_captured_headers,omitempty"` - - // Configure headers to capture for outbound http responses. - // If omitted, no response headers are captures. - // - ResponseCapturedHeaders []string `json:"response_captured_headers,omitempty" yaml:"response_captured_headers,omitempty" mapstructure:"response_captured_headers,omitempty"` -} - -type ExperimentalInstrumentation struct { - // Configure C++ language-specific instrumentation libraries. - // If omitted, instrumentation defaults are used. - // - Cpp ExperimentalLanguageSpecificInstrumentation `json:"cpp,omitempty" yaml:"cpp,omitempty" mapstructure:"cpp,omitempty"` - - // Configure .NET language-specific instrumentation libraries. - // Each entry's key identifies a particular instrumentation library. The - // corresponding value configures it. - // If omitted, instrumentation defaults are used. - // - Dotnet ExperimentalLanguageSpecificInstrumentation `json:"dotnet,omitempty" yaml:"dotnet,omitempty" mapstructure:"dotnet,omitempty"` - - // Configure Erlang language-specific instrumentation libraries. - // Each entry's key identifies a particular instrumentation library. The - // corresponding value configures it. - // If omitted, instrumentation defaults are used. - // - Erlang ExperimentalLanguageSpecificInstrumentation `json:"erlang,omitempty" yaml:"erlang,omitempty" mapstructure:"erlang,omitempty"` - - // Configure general SemConv options that may apply to multiple languages and - // instrumentations. - // Instrumenation may merge general config options with the language specific - // configuration at .instrumentation.. - // If omitted, default values as described in ExperimentalGeneralInstrumentation - // are used. - // - General *ExperimentalGeneralInstrumentation `json:"general,omitempty" yaml:"general,omitempty" mapstructure:"general,omitempty"` - - // Configure Go language-specific instrumentation libraries. - // Each entry's key identifies a particular instrumentation library. The - // corresponding value configures it. - // If omitted, instrumentation defaults are used. - // - Go ExperimentalLanguageSpecificInstrumentation `json:"go,omitempty" yaml:"go,omitempty" mapstructure:"go,omitempty"` - - // Configure Java language-specific instrumentation libraries. - // Each entry's key identifies a particular instrumentation library. The - // corresponding value configures it. - // If omitted, instrumentation defaults are used. - // - Java ExperimentalLanguageSpecificInstrumentation `json:"java,omitempty" yaml:"java,omitempty" mapstructure:"java,omitempty"` - - // Configure JavaScript language-specific instrumentation libraries. - // Each entry's key identifies a particular instrumentation library. The - // corresponding value configures it. - // If omitted, instrumentation defaults are used. - // - Js ExperimentalLanguageSpecificInstrumentation `json:"js,omitempty" yaml:"js,omitempty" mapstructure:"js,omitempty"` - - // Configure PHP language-specific instrumentation libraries. - // Each entry's key identifies a particular instrumentation library. The - // corresponding value configures it. - // If omitted, instrumentation defaults are used. - // - Php ExperimentalLanguageSpecificInstrumentation `json:"php,omitempty" yaml:"php,omitempty" mapstructure:"php,omitempty"` - - // Configure Python language-specific instrumentation libraries. - // Each entry's key identifies a particular instrumentation library. The - // corresponding value configures it. - // If omitted, instrumentation defaults are used. - // - Python ExperimentalLanguageSpecificInstrumentation `json:"python,omitempty" yaml:"python,omitempty" mapstructure:"python,omitempty"` - - // Configure Ruby language-specific instrumentation libraries. - // Each entry's key identifies a particular instrumentation library. The - // corresponding value configures it. - // If omitted, instrumentation defaults are used. - // - Ruby ExperimentalLanguageSpecificInstrumentation `json:"ruby,omitempty" yaml:"ruby,omitempty" mapstructure:"ruby,omitempty"` - - // Configure Rust language-specific instrumentation libraries. - // Each entry's key identifies a particular instrumentation library. The - // corresponding value configures it. - // If omitted, instrumentation defaults are used. - // - Rust ExperimentalLanguageSpecificInstrumentation `json:"rust,omitempty" yaml:"rust,omitempty" mapstructure:"rust,omitempty"` - - // Configure Swift language-specific instrumentation libraries. - // Each entry's key identifies a particular instrumentation library. The - // corresponding value configures it. - // If omitted, instrumentation defaults are used. - // - Swift ExperimentalLanguageSpecificInstrumentation `json:"swift,omitempty" yaml:"swift,omitempty" mapstructure:"swift,omitempty"` -} - -type ExperimentalJaegerRemoteSampler struct { - // Configure the endpoint of the jaeger remote sampling service. - // Property is required and must be non-null. - // - Endpoint string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` - - // Configure the initial sampler used before first configuration is fetched. - // Property is required and must be non-null. - // - InitialSampler Sampler `json:"initial_sampler" yaml:"initial_sampler" mapstructure:"initial_sampler"` - - // Configure the polling interval (in milliseconds) to fetch from the remote - // sampling service. - // If omitted or null, 60000 is used. - // - Interval ExperimentalJaegerRemoteSamplerInterval `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` -} - -// Configure the polling interval (in milliseconds) to fetch from the remote -// sampling service. -// If omitted or null, 60000 is used. -type ExperimentalJaegerRemoteSamplerInterval *int - -type ExperimentalLanguageSpecificInstrumentation map[string]map[string]interface{} - -type ExperimentalLoggerConfig struct { - // Configure if the logger is enabled or not. - // If omitted or null, false is used. - // - Disabled ExperimentalLoggerConfigDisabled `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` - - // Configure severity filtering. - // Log records with an non-zero (i.e. unspecified) severity number which is less - // than minimum_severity are not processed. - // Values include: - // * debug: debug, severity number 5. - // * debug2: debug2, severity number 6. - // * debug3: debug3, severity number 7. - // * debug4: debug4, severity number 8. - // * error: error, severity number 17. - // * error2: error2, severity number 18. - // * error3: error3, severity number 19. - // * error4: error4, severity number 20. - // * fatal: fatal, severity number 21. - // * fatal2: fatal2, severity number 22. - // * fatal3: fatal3, severity number 23. - // * fatal4: fatal4, severity number 24. - // * info: info, severity number 9. - // * info2: info2, severity number 10. - // * info3: info3, severity number 11. - // * info4: info4, severity number 12. - // * trace: trace, severity number 1. - // * trace2: trace2, severity number 2. - // * trace3: trace3, severity number 3. - // * trace4: trace4, severity number 4. - // * warn: warn, severity number 13. - // * warn2: warn2, severity number 14. - // * warn3: warn3, severity number 15. - // * warn4: warn4, severity number 16. - // If omitted, severity filtering is not applied. - // - MinimumSeverity *SeverityNumber `json:"minimum_severity,omitempty" yaml:"minimum_severity,omitempty" mapstructure:"minimum_severity,omitempty"` - - // Configure trace based filtering. - // If true, log records associated with unsampled trace contexts traces are not - // processed. If false, or if a log record is not associated with a trace context, - // trace based filtering is not applied. - // If omitted or null, trace based filtering is not applied. - // - TraceBased ExperimentalLoggerConfigTraceBased `json:"trace_based,omitempty" yaml:"trace_based,omitempty" mapstructure:"trace_based,omitempty"` -} - -// Configure if the logger is enabled or not. -// If omitted or null, false is used. -type ExperimentalLoggerConfigDisabled *bool - -// Configure trace based filtering. -// If true, log records associated with unsampled trace contexts traces are not -// processed. If false, or if a log record is not associated with a trace context, -// trace based filtering is not applied. -// If omitted or null, trace based filtering is not applied. -type ExperimentalLoggerConfigTraceBased *bool - -type ExperimentalLoggerConfigurator struct { - // Configure the default logger config used there is no matching entry in - // .logger_configurator/development.loggers. - // If omitted, unmatched .loggers use default values as described in - // ExperimentalLoggerConfig. - // - DefaultConfig *ExperimentalLoggerConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` - - // Configure loggers. - // If omitted, all loggers use .default_config. - // - Loggers []ExperimentalLoggerMatcherAndConfig `json:"loggers,omitempty" yaml:"loggers,omitempty" mapstructure:"loggers,omitempty"` -} - -type ExperimentalLoggerMatcherAndConfig struct { - // The logger config. - // Property is required and must be non-null. - // - Config ExperimentalLoggerConfig `json:"config" yaml:"config" mapstructure:"config"` - - // Configure logger names to match, evaluated as follows: - // - // * If the logger name exactly matches. - // * If the logger name matches the wildcard pattern, where '?' matches any - // single character and '*' matches any number of characters including none. - // Property is required and must be non-null. - // - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - -type ExperimentalMeterConfig struct { - // Configure if the meter is enabled or not. - // If omitted, false is used. - // - Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` -} - -type ExperimentalMeterConfigurator struct { - // Configure the default meter config used there is no matching entry in - // .meter_configurator/development.meters. - // If omitted, unmatched .meters use default values as described in - // ExperimentalMeterConfig. - // - DefaultConfig *ExperimentalMeterConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` - - // Configure meters. - // If omitted, all meters used .default_config. - // - Meters []ExperimentalMeterMatcherAndConfig `json:"meters,omitempty" yaml:"meters,omitempty" mapstructure:"meters,omitempty"` -} - -type ExperimentalMeterMatcherAndConfig struct { - // The meter config. - // Property is required and must be non-null. - // - Config ExperimentalMeterConfig `json:"config" yaml:"config" mapstructure:"config"` - - // Configure meter names to match, evaluated as follows: - // - // * If the meter name exactly matches. - // * If the meter name matches the wildcard pattern, where '?' matches any single - // character and '*' matches any number of characters including none. - // Property is required and must be non-null. - // - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - -type ExperimentalOTLPFileExporter struct { - // Configure output stream. - // Values include stdout, or scheme+destination. For example: - // file:///path/to/file.jsonl. - // If omitted or null, stdout is used. - // - OutputStream ExperimentalOTLPFileExporterOutputStream `json:"output_stream,omitempty" yaml:"output_stream,omitempty" mapstructure:"output_stream,omitempty"` -} - -// Configure output stream. -// Values include stdout, or scheme+destination. For example: -// file:///path/to/file.jsonl. -// If omitted or null, stdout is used. -type ExperimentalOTLPFileExporterOutputStream *string - -type ExperimentalOTLPFileMetricExporter struct { - // Configure default histogram aggregation. - // Values include: - // * base2_exponential_bucket_histogram: Use base2 exponential histogram as the - // default aggregation for histogram instruments. - // * explicit_bucket_histogram: Use explicit bucket histogram as the default - // aggregation for histogram instruments. - // If omitted, explicit_bucket_histogram is used. - // - DefaultHistogramAggregation *ExporterDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` - - // Configure output stream. - // Values include stdout, or scheme+destination. For example: - // file:///path/to/file.jsonl. - // If omitted or null, stdout is used. - // - OutputStream ExperimentalOTLPFileMetricExporterOutputStream `json:"output_stream,omitempty" yaml:"output_stream,omitempty" mapstructure:"output_stream,omitempty"` - - // Configure temporality preference. - // Values include: - // * cumulative: Use cumulative aggregation temporality for all instrument types. - // * delta: Use delta aggregation for all instrument types except up down counter - // and asynchronous up down counter. - // * low_memory: Use delta aggregation temporality for counter and histogram - // instrument types. Use cumulative aggregation temporality for all other - // instrument types. - // If omitted, cumulative is used. - // - TemporalityPreference *ExporterTemporalityPreference `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` -} - -// Configure output stream. -// Values include stdout, or scheme+destination. For example: -// file:///path/to/file.jsonl. -// If omitted or null, stdout is used. -type ExperimentalOTLPFileMetricExporterOutputStream *string - -type ExperimentalPeerInstrumentation struct { - // Configure the service mapping for instrumentations following peer.service - // semantic conventions. - // See peer.service semantic conventions: - // https://opentelemetry.io/docs/specs/semconv/general/attributes/#general-remote-service-attributes - // If omitted, no peer service mappings are used. - // - ServiceMapping []ExperimentalPeerServiceMapping `json:"service_mapping,omitempty" yaml:"service_mapping,omitempty" mapstructure:"service_mapping,omitempty"` -} - -type ExperimentalPeerServiceMapping struct { - // The IP address to map. - // Property is required and must be non-null. - // - Peer string `json:"peer" yaml:"peer" mapstructure:"peer"` - - // The logical name corresponding to the IP address of .peer. - // Property is required and must be non-null. - // - Service string `json:"service" yaml:"service" mapstructure:"service"` -} - -type ExperimentalProbabilitySampler struct { - // Configure ratio. - // If omitted or null, 1.0 is used. - // - Ratio ExperimentalProbabilitySamplerRatio `json:"ratio,omitempty" yaml:"ratio,omitempty" mapstructure:"ratio,omitempty"` -} - -// Configure ratio. -// If omitted or null, 1.0 is used. -type ExperimentalProbabilitySamplerRatio *float64 - -type ExperimentalProcessResourceDetector map[string]interface{} - -type ExperimentalPrometheusMetricExporter struct { - // Configure host. - // If omitted or null, localhost is used. - // - Host ExperimentalPrometheusMetricExporterHost `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // Configure port. - // If omitted or null, 9464 is used. - // - Port ExperimentalPrometheusMetricExporterPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` - - // Configure how metric names are translated to Prometheus metric names. - // Values include: - // * no_translation: Special character escaping is disabled. Type and unit - // suffixes are disabled. Metric names are unaltered. - // * no_utf8_escaping_with_suffixes: Special character escaping is disabled. Type - // and unit suffixes are enabled. - // * underscore_escaping_with_suffixes: Special character escaping is enabled. - // Type and unit suffixes are enabled. - // * underscore_escaping_without_suffixes: Special character escaping is enabled. - // Type and unit suffixes are disabled. This represents classic Prometheus metric - // name compatibility. - // If omitted, underscore_escaping_with_suffixes is used. - // - TranslationStrategy *ExperimentalPrometheusTranslationStrategy `json:"translation_strategy,omitempty" yaml:"translation_strategy,omitempty" mapstructure:"translation_strategy,omitempty"` - - // Configure Prometheus Exporter to add resource attributes as metrics attributes, - // where the resource attribute keys match the patterns. - // If omitted, no resource attributes are added. - // - WithResourceConstantLabels *IncludeExclude `json:"with_resource_constant_labels,omitempty" yaml:"with_resource_constant_labels,omitempty" mapstructure:"with_resource_constant_labels,omitempty"` - - // Configure Prometheus Exporter to produce metrics without a scope info metric. - // If omitted or null, false is used. - // - WithoutScopeInfo ExperimentalPrometheusMetricExporterWithoutScopeInfo `json:"without_scope_info,omitempty" yaml:"without_scope_info,omitempty" mapstructure:"without_scope_info,omitempty"` - - // Configure Prometheus Exporter to produce metrics without a target info metric - // for the resource. - // If omitted or null, false is used. - // - WithoutTargetInfo ExperimentalPrometheusMetricExporterWithoutTargetInfo `json:"without_target_info,omitempty" yaml:"without_target_info,omitempty" mapstructure:"without_target_info,omitempty"` -} - -// Configure host. -// If omitted or null, localhost is used. -type ExperimentalPrometheusMetricExporterHost *string - -// Configure port. -// If omitted or null, 9464 is used. -type ExperimentalPrometheusMetricExporterPort *int - -// Configure Prometheus Exporter to produce metrics without a scope info metric. -// If omitted or null, false is used. -type ExperimentalPrometheusMetricExporterWithoutScopeInfo *bool - -// Configure Prometheus Exporter to produce metrics without a target info metric -// for the resource. -// If omitted or null, false is used. -type ExperimentalPrometheusMetricExporterWithoutTargetInfo *bool - -type ExperimentalPrometheusTranslationStrategy string - -const ExperimentalPrometheusTranslationStrategyNoTranslation ExperimentalPrometheusTranslationStrategy = "no_translation" -const ExperimentalPrometheusTranslationStrategyNoUtf8EscapingWithSuffixes ExperimentalPrometheusTranslationStrategy = "no_utf8_escaping_with_suffixes" -const ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithSuffixes ExperimentalPrometheusTranslationStrategy = "underscore_escaping_with_suffixes" -const ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithoutSuffixes ExperimentalPrometheusTranslationStrategy = "underscore_escaping_without_suffixes" - -type ExperimentalResourceDetection struct { - // Configure attributes provided by resource detectors. - // If omitted, all attributes from resource detectors are added. - // - Attributes *IncludeExclude `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` - - // Configure resource detectors. - // Resource detector names are dependent on the SDK language ecosystem. Please - // consult documentation for each respective language. - // If omitted, no resource detectors are enabled. - // - Detectors []ExperimentalResourceDetector `json:"detectors,omitempty" yaml:"detectors,omitempty" mapstructure:"detectors,omitempty"` -} - -type ExperimentalResourceDetector struct { - // Enable the container resource detector, which populates container.* attributes. - // If omitted, ignore. - // - Container ExperimentalContainerResourceDetector `json:"container,omitempty" yaml:"container,omitempty" mapstructure:"container,omitempty"` - - // Enable the host resource detector, which populates host.* and os.* attributes. - // If omitted, ignore. - // - Host ExperimentalHostResourceDetector `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // Enable the process resource detector, which populates process.* attributes. - // If omitted, ignore. - // - Process ExperimentalProcessResourceDetector `json:"process,omitempty" yaml:"process,omitempty" mapstructure:"process,omitempty"` - - // Enable the service detector, which populates service.name based on the - // OTEL_SERVICE_NAME environment variable and service.instance.id. - // If omitted, ignore. - // - Service ExperimentalServiceResourceDetector `json:"service,omitempty" yaml:"service,omitempty" mapstructure:"service,omitempty"` - - AdditionalProperties interface{} `mapstructure:",remain"` -} - -type ExperimentalServiceResourceDetector map[string]interface{} - -type ExperimentalSpanParent string - -const ExperimentalSpanParentLocal ExperimentalSpanParent = "local" -const ExperimentalSpanParentNone ExperimentalSpanParent = "none" -const ExperimentalSpanParentRemote ExperimentalSpanParent = "remote" - -type ExperimentalTracerConfig struct { - // Configure if the tracer is enabled or not. - // If omitted, false is used. - // - Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` -} - -type ExperimentalTracerConfigurator struct { - // Configure the default tracer config used there is no matching entry in - // .tracer_configurator/development.tracers. - // If omitted, unmatched .tracers use default values as described in - // ExperimentalTracerConfig. - // - DefaultConfig *ExperimentalTracerConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` - - // Configure tracers. - // If omitted, all tracers use .default_config. - // - Tracers []ExperimentalTracerMatcherAndConfig `json:"tracers,omitempty" yaml:"tracers,omitempty" mapstructure:"tracers,omitempty"` -} - -type ExperimentalTracerMatcherAndConfig struct { - // The tracer config. - // Property is required and must be non-null. - // - Config ExperimentalTracerConfig `json:"config" yaml:"config" mapstructure:"config"` - - // Configure tracer names to match, evaluated as follows: - // - // * If the tracer name exactly matches. - // * If the tracer name matches the wildcard pattern, where '?' matches any - // single character and '*' matches any number of characters including none. - // Property is required and must be non-null. - // - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - type ExplicitBucketHistogramAggregation struct { // Configure bucket boundaries. // If omitted, [0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, @@ -1209,11 +525,6 @@ type LogRecordExporter struct { // Console ConsoleExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - // Configure exporter to be OTLP with file transport. - // If omitted, ignore. - // - OTLPFileDevelopment *ExperimentalOTLPFileExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` - // Configure exporter to be OTLP with gRPC transport. // If omitted, ignore. // @@ -1275,12 +586,6 @@ type LoggerProvider struct { // Limits *LogRecordLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - // Configure loggers. - // If omitted, all loggers use default values as described in - // ExperimentalLoggerConfig. - // - LoggerConfiguratorDevelopment *ExperimentalLoggerConfigurator `json:"logger_configurator/development,omitempty" yaml:"logger_configurator/development,omitempty" mapstructure:"logger_configurator/development,omitempty"` - // Configure log record processors. // Property is required and must be non-null. // @@ -1300,12 +605,6 @@ type MeterProvider struct { // ExemplarFilter *ExemplarFilter `json:"exemplar_filter,omitempty" yaml:"exemplar_filter,omitempty" mapstructure:"exemplar_filter,omitempty"` - // Configure meters. - // If omitted, all meters use default values as described in - // ExperimentalMeterConfig. - // - MeterConfiguratorDevelopment *ExperimentalMeterConfigurator `json:"meter_configurator/development,omitempty" yaml:"meter_configurator/development,omitempty" mapstructure:"meter_configurator/development,omitempty"` - // Configure metric readers. // Property is required and must be non-null. // @@ -1721,11 +1020,6 @@ type OpenTelemetryConfiguration struct { // FileFormat string `json:"file_format" yaml:"file_format" mapstructure:"file_format"` - // Configure instrumentation. - // If omitted, instrumentation defaults are used. - // - InstrumentationDevelopment *ExperimentalInstrumentation `json:"instrumentation/development,omitempty" yaml:"instrumentation/development,omitempty" mapstructure:"instrumentation/development,omitempty"` - // Configure the log level of the internal logger used by the SDK. // Values include: // * debug: debug, severity number 5. @@ -1894,11 +1188,6 @@ type Propagator struct { type PropagatorCompositeList *string type PullMetricExporter struct { - // Configure exporter to be prometheus. - // If omitted, ignore. - // - PrometheusDevelopment *ExperimentalPrometheusMetricExporter `json:"prometheus/development,omitempty" yaml:"prometheus/development,omitempty" mapstructure:"prometheus/development,omitempty"` - AdditionalProperties interface{} `mapstructure:",remain"` } @@ -1925,11 +1214,6 @@ type PushMetricExporter struct { // Console *ConsoleMetricExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - // Configure exporter to be OTLP with file transport. - // If omitted, ignore. - // - OTLPFileDevelopment *ExperimentalOTLPFileMetricExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` - // Configure exporter to be OTLP with gRPC transport. // If omitted, ignore. // @@ -1960,11 +1244,6 @@ type Resource struct { // AttributesList ResourceAttributesList `json:"attributes_list,omitempty" yaml:"attributes_list,omitempty" mapstructure:"attributes_list,omitempty"` - // Configure resource detection. - // If omitted, resource detection is disabled. - // - DetectionDevelopment *ExperimentalResourceDetection `json:"detection/development,omitempty" yaml:"detection/development,omitempty" mapstructure:"detection/development,omitempty"` - // Configure resource schema URL. // If omitted or null, no schema URL is used. // @@ -1995,26 +1274,11 @@ type Sampler struct { // AlwaysOn AlwaysOnSampler `json:"always_on,omitempty" yaml:"always_on,omitempty" mapstructure:"always_on,omitempty"` - // Configure sampler to be composite. - // If omitted, ignore. - // - CompositeDevelopment *ExperimentalComposableSampler `json:"composite/development,omitempty" yaml:"composite/development,omitempty" mapstructure:"composite/development,omitempty"` - - // Configure sampler to be jaeger_remote. - // If omitted, ignore. - // - JaegerRemoteDevelopment *ExperimentalJaegerRemoteSampler `json:"jaeger_remote/development,omitempty" yaml:"jaeger_remote/development,omitempty" mapstructure:"jaeger_remote/development,omitempty"` - // Configure sampler to be parent_based. // If omitted, ignore. // ParentBased *ParentBasedSampler `json:"parent_based,omitempty" yaml:"parent_based,omitempty" mapstructure:"parent_based,omitempty"` - // Configure sampler to be probability. - // If omitted, ignore. - // - ProbabilityDevelopment *ExperimentalProbabilitySampler `json:"probability/development,omitempty" yaml:"probability/development,omitempty" mapstructure:"probability/development,omitempty"` - // Configure sampler to be trace_id_ratio_based. // If omitted, ignore. // @@ -2070,11 +1334,6 @@ type SpanExporter struct { // Console ConsoleExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - // Configure exporter to be OTLP with file transport. - // If omitted, ignore. - // - OTLPFileDevelopment *ExperimentalOTLPFileExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` - // Configure exporter to be OTLP with gRPC transport. // If omitted, ignore. // @@ -2246,12 +1505,6 @@ type TracerProvider struct { // If omitted, parent based sampler with a root of always_on is used. // Sampler *Sampler `json:"sampler,omitempty" yaml:"sampler,omitempty" mapstructure:"sampler,omitempty"` - - // Configure tracers. - // If omitted, all tracers use default values as described in - // ExperimentalTracerConfig. - // - TracerConfiguratorDevelopment *ExperimentalTracerConfigurator `json:"tracer_configurator/development,omitempty" yaml:"tracer_configurator/development,omitempty" mapstructure:"tracer_configurator/development,omitempty"` } type View struct { diff --git a/otelconf/log.go b/otelconf/log.go index 24400a14194..f30d72cc254 100644 --- a/otelconf/log.go +++ b/otelconf/log.go @@ -92,10 +92,6 @@ func logExporter(ctx context.Context, exporter LogRecordExporter) (sdklog.Export return otlpGRPCLogExporter(ctx, exporter.OTLPGrpc) } } - if exporter.OTLPFileDevelopment != nil { - // TODO: implement file exporter https://github.com/open-telemetry/opentelemetry-go/issues/5408 - return nil, newErrInvalid("otlp_file/development") - } if exportersConfigured > 1 { return nil, newErrInvalid("must not specify multiple exporters") diff --git a/otelconf/log_test.go b/otelconf/log_test.go index e7dcd39bac5..64432ed0333 100644 --- a/otelconf/log_test.go +++ b/otelconf/log_test.go @@ -633,9 +633,7 @@ func TestLogProcessor(t *testing.T) { name: "simple/otlp_file", processor: LogRecordProcessor{ Simple: &SimpleLogRecordProcessor{ - Exporter: LogRecordExporter{ - OTLPFileDevelopment: &ExperimentalOTLPFileExporter{}, - }, + Exporter: LogRecordExporter{}, }, }, wantErrT: newErrInvalid("otlp_file/development"), diff --git a/otelconf/metric.go b/otelconf/metric.go index b02c519385f..edeb2d7378c 100644 --- a/otelconf/metric.go +++ b/otelconf/metric.go @@ -9,21 +9,14 @@ import ( "errors" "fmt" "math" - "net" "net/http" "net/url" "os" - "strconv" "time" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/prometheus/otlptranslator" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" - otelprom "go.opentelemetry.io/otel/exporters/prometheus" "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/noop" @@ -96,9 +89,6 @@ func metricReader(ctx context.Context, r MetricReader) (sdkmetric.Reader, error) } func pullReader(ctx context.Context, exporter PullMetricExporter) (sdkmetric.Reader, error) { - if exporter.PrometheusDevelopment != nil { - return prometheusReader(ctx, exporter.PrometheusDevelopment) - } return nil, newErrInvalid("no valid metric exporter") } @@ -141,10 +131,6 @@ func periodicExporter(ctx context.Context, exporter PushMetricExporter, opts ... return sdkmetric.NewPeriodicReader(exp, opts...), nil } } - if exporter.OTLPFileDevelopment != nil { - // TODO: implement file exporter https://github.com/open-telemetry/opentelemetry-go/issues/5408 - return nil, newErrInvalid("otlp_file/development") - } if exportersConfigured > 1 { return nil, newErrInvalid("must not specify multiple exporters") @@ -342,94 +328,6 @@ func newIncludeExcludeFilter(lists *IncludeExclude) (attribute.Filter, error) { }, nil } -func prometheusReader(ctx context.Context, prometheusConfig *ExperimentalPrometheusMetricExporter) (sdkmetric.Reader, error) { - if prometheusConfig.Host == nil { - return nil, newErrInvalid("host must be specified") - } - if prometheusConfig.Port == nil { - return nil, newErrInvalid("port must be specified") - } - - opts, err := prometheusReaderOpts(prometheusConfig) - if err != nil { - return nil, err - } - - reg := prometheus.NewRegistry() - opts = append(opts, otelprom.WithRegisterer(reg)) - - reader, err := otelprom.New(opts...) - if err != nil { - return nil, fmt.Errorf("error creating otel prometheus exporter: %w", err) - } - - mux := http.NewServeMux() - mux.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) - server := http.Server{ - // Timeouts are necessary to make a server resilient to attacks. - // We use values from this example: https://blog.cloudflare.com/exposing-go-on-the-internet/#:~:text=There%20are%20three%20main%20timeouts - ReadTimeout: 5 * time.Second, - WriteTimeout: 10 * time.Second, - IdleTimeout: 120 * time.Second, - Handler: mux, - } - - // Remove surrounding "[]" from the host definition to allow users to define the host as "[::1]" or "::1". - host := *prometheusConfig.Host - if len(host) > 2 && host[0] == '[' && host[len(host)-1] == ']' { - host = host[1 : len(host)-1] - } - - addr := net.JoinHostPort(host, strconv.Itoa(*prometheusConfig.Port)) - lis, err := net.Listen("tcp", addr) - if err != nil { - return nil, errors.Join( - fmt.Errorf("binding address %s for Prometheus exporter: %w", addr, err), - reader.Shutdown(ctx), - ) - } - - // Only for testing reasons, add the address to the http Server, will not be used. - server.Addr = lis.Addr().String() - - go func() { - if err := server.Serve(lis); err != nil && !errors.Is(err, http.ErrServerClosed) { - otel.Handle(fmt.Errorf("the Prometheus HTTP server exited unexpectedly: %w", err)) - } - }() - - return readerWithServer{reader, &server}, nil -} - -func validTranslationStrategy(strategy ExperimentalPrometheusTranslationStrategy) bool { - return strategy == ExperimentalPrometheusTranslationStrategyNoTranslation || - strategy == ExperimentalPrometheusTranslationStrategyNoUtf8EscapingWithSuffixes || - strategy == ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithSuffixes || - strategy == ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithoutSuffixes -} - -func prometheusReaderOpts(prometheusConfig *ExperimentalPrometheusMetricExporter) ([]otelprom.Option, error) { - var opts []otelprom.Option - if prometheusConfig.WithoutScopeInfo != nil && *prometheusConfig.WithoutScopeInfo { - opts = append(opts, otelprom.WithoutScopeInfo()) - } - if prometheusConfig.TranslationStrategy != nil { - if !validTranslationStrategy(*prometheusConfig.TranslationStrategy) { - return nil, newErrInvalid("translation strategy invalid") - } - opts = append(opts, otelprom.WithTranslationStrategy(otlptranslator.TranslationStrategyOption(*prometheusConfig.TranslationStrategy))) - } - if prometheusConfig.WithResourceConstantLabels != nil { - f, err := newIncludeExcludeFilter(prometheusConfig.WithResourceConstantLabels) - if err != nil { - return nil, err - } - opts = append(opts, otelprom.WithResourceAsConstantLabels(f)) - } - - return opts, nil -} - type readerWithServer struct { sdkmetric.Reader server *http.Server diff --git a/otelconf/metric_test.go b/otelconf/metric_test.go index d1ec9ccd0be..aed5aaa0cf1 100644 --- a/otelconf/metric_test.go +++ b/otelconf/metric_test.go @@ -17,7 +17,6 @@ import ( "path/filepath" "reflect" "runtime" - "strings" "testing" "time" @@ -26,7 +25,6 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" - otelprom "go.opentelemetry.io/otel/exporters/prometheus" "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/noop" @@ -163,8 +161,6 @@ func TestReader(t *testing.T) { require.NoError(t, err) otlpHTTPExporter, err := otlpmetrichttp.New(ctx) require.NoError(t, err) - promExporter, err := otelprom.New() - require.NoError(t, err) testCases := []struct { name string reader MetricReader @@ -183,70 +179,6 @@ func TestReader(t *testing.T) { }, wantErrT: newErrInvalid("no valid metric exporter"), }, - { - name: "pull/prometheus-no-host", - reader: MetricReader{ - Pull: &PullMetricReader{ - Exporter: PullMetricExporter{ - PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{}, - }, - }, - }, - wantErrT: newErrInvalid("host must be specified"), - }, - { - name: "pull/prometheus-no-port", - reader: MetricReader{ - Pull: &PullMetricReader{ - Exporter: PullMetricExporter{ - PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{ - Host: ptr("localhost"), - }, - }, - }, - }, - wantErrT: newErrInvalid("port must be specified"), - }, - { - name: "pull/prometheus", - reader: MetricReader{ - Pull: &PullMetricReader{ - Exporter: PullMetricExporter{ - PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{ - Host: ptr("localhost"), - Port: ptr(0), - WithoutScopeInfo: ptr(true), - TranslationStrategy: ptr(ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithoutSuffixes), - WithResourceConstantLabels: &IncludeExclude{ - Included: []string{"include"}, - Excluded: []string{"exclude"}, - }, - }, - }, - }, - }, - wantReader: readerWithServer{promExporter, nil}, - }, - { - name: "pull/prometheus/invalid strategy", - reader: MetricReader{ - Pull: &PullMetricReader{ - Exporter: PullMetricExporter{ - PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{ - Host: ptr("localhost"), - Port: ptr(0), - WithoutScopeInfo: ptr(true), - TranslationStrategy: ptr(ExperimentalPrometheusTranslationStrategy("invalid-strategy")), - WithResourceConstantLabels: &IncludeExclude{ - Included: []string{"include"}, - Excluded: []string{"exclude"}, - }, - }, - }, - }, - }, - wantErrT: newErrInvalid("translation strategy invalid"), - }, { name: "periodic/otlp-grpc-exporter", reader: MetricReader{ @@ -846,17 +778,6 @@ func TestReader(t *testing.T) { sdkmetric.WithTimeout(5_000*time.Millisecond), ), }, - { - name: "periodic/otlp_file", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLPFileDevelopment: &ExperimentalOTLPFileMetricExporter{}, - }, - }, - }, - wantErrT: newErrInvalid("otlp_file/development"), - }, } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { @@ -1354,90 +1275,6 @@ func TestNewIncludeExcludeFilterError(t *testing.T) { require.Equal(t, fmt.Errorf("attribute cannot be in both include and exclude list: foo"), err) } -func TestPrometheusReaderOpts(t *testing.T) { - testCases := []struct { - name string - cfg ExperimentalPrometheusMetricExporter - wantOptions int - }{ - { - name: "no options", - cfg: ExperimentalPrometheusMetricExporter{}, - wantOptions: 0, - }, - { - name: "all set", - cfg: ExperimentalPrometheusMetricExporter{ - WithoutScopeInfo: ptr(true), - TranslationStrategy: ptr(ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithoutSuffixes), - WithResourceConstantLabels: &IncludeExclude{}, - }, - wantOptions: 3, - }, - { - name: "all set false", - cfg: ExperimentalPrometheusMetricExporter{ - WithoutScopeInfo: ptr(false), - TranslationStrategy: ptr(ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithSuffixes), - WithResourceConstantLabels: &IncludeExclude{}, - }, - wantOptions: 2, - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - opts, err := prometheusReaderOpts(&tt.cfg) - require.NoError(t, err) - require.Len(t, opts, tt.wantOptions) - }) - } -} - -func TestPrometheusIPv6(t *testing.T) { - tests := []struct { - name string - host string - }{ - { - name: "IPv6", - host: "::1", - }, - { - name: "[IPv6]", - host: "[::1]", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - port := 0 - cfg := ExperimentalPrometheusMetricExporter{ - Host: &tt.host, - Port: &port, - WithoutScopeInfo: ptr(true), - TranslationStrategy: ptr(ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithSuffixes), - WithResourceConstantLabels: &IncludeExclude{}, - } - - rs, err := prometheusReader(t.Context(), &cfg) - t.Cleanup(func() { - //nolint:usetesting // required to avoid getting a canceled context at cleanup. - require.NoError(t, rs.Shutdown(context.Background())) - }) - require.NoError(t, err) - - hServ := rs.(readerWithServer).server - assert.True(t, strings.HasPrefix(hServ.Addr, "[::1]:")) - - resp, err := http.DefaultClient.Get("http://" + hServ.Addr + "/metrics") - t.Cleanup(func() { - require.NoError(t, resp.Body.Close()) - }) - require.NoError(t, err) - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - } -} - func Test_otlpGRPCMetricExporter(t *testing.T) { if runtime.GOOS == "windows" { // TODO (#7446): Fix the flakiness on Windows. diff --git a/otelconf/resource.go b/otelconf/resource.go index cab0aeb9aaf..90bbb0331d6 100644 --- a/otelconf/resource.go +++ b/otelconf/resource.go @@ -12,24 +12,6 @@ import ( "go.opentelemetry.io/contrib/otelconf/internal/kv" ) -func resourceOpts(detectors []ExperimentalResourceDetector) []resource.Option { - opts := []resource.Option{} - for _, d := range detectors { - if d.Container != nil { - opts = append(opts, resource.WithContainer()) - } - if d.Host != nil { - opts = append(opts, resource.WithHost(), resource.WithHostID()) - } - if d.Process != nil { - opts = append(opts, resource.WithProcess()) - } - // TODO: implement service: - // Waiting on https://github.com/open-telemetry/opentelemetry-go/pull/7642 - } - return opts -} - func newResource(r *Resource) (*resource.Resource, error) { if r == nil { return resource.Default(), nil @@ -49,9 +31,5 @@ func newResource(r *Resource) (*resource.Resource, error) { resource.WithSchemaURL(schema), } - if r.DetectionDevelopment != nil { - opts = append(opts, resourceOpts(r.DetectionDevelopment.Detectors)...) - } - return resource.New(context.Background(), opts...) } diff --git a/otelconf/trace.go b/otelconf/trace.go index f0de96bd192..889256d7078 100644 --- a/otelconf/trace.go +++ b/otelconf/trace.go @@ -151,10 +151,6 @@ func spanExporter(ctx context.Context, exporter SpanExporter) (sdktrace.SpanExpo return otlpGRPCSpanExporter(ctx, exporter.OTLPGrpc) } } - if exporter.OTLPFileDevelopment != nil { - // TODO: implement file exporter https://github.com/open-telemetry/opentelemetry-go/issues/5408 - return nil, newErrInvalid("otlp_file/development") - } if exportersConfigured > 1 { return nil, newErrInvalid("must not specify multiple exporters") diff --git a/otelconf/trace_test.go b/otelconf/trace_test.go index c7dc2270a44..0ab416c9157 100644 --- a/otelconf/trace_test.go +++ b/otelconf/trace_test.go @@ -734,9 +734,7 @@ func TestSpanProcessor(t *testing.T) { name: "simple/otlp_file", processor: SpanProcessor{ Simple: &SimpleSpanProcessor{ - Exporter: SpanExporter{ - OTLPFileDevelopment: &ExperimentalOTLPFileExporter{}, - }, + Exporter: SpanExporter{}, }, }, wantErrT: newErrInvalid("otlp_file/development"), diff --git a/otelconf/x/config.go b/otelconf/x/config.go new file mode 100644 index 00000000000..18ab2aaa290 --- /dev/null +++ b/otelconf/x/config.go @@ -0,0 +1,215 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package otelconf provides an OpenTelemetry declarative configuration SDK. +package x // import "go.opentelemetry.io/contrib/otelconf/x" + +import ( + "context" + "errors" + "os" + + "go.opentelemetry.io/otel/log" + nooplog "go.opentelemetry.io/otel/log/noop" + "go.opentelemetry.io/otel/metric" + noopmetric "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/propagation" + sdklog "go.opentelemetry.io/otel/sdk/log" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + nooptrace "go.opentelemetry.io/otel/trace/noop" + yaml "go.yaml.in/yaml/v3" + + "go.opentelemetry.io/contrib/otelconf/internal/provider" +) + +const envVarConfigFile = "OTEL_EXPERIMENTAL_CONFIG_FILE" + +// SDK is a struct that contains all the providers +// configured via the configuration model. +type SDK struct { + meterProvider metric.MeterProvider + tracerProvider trace.TracerProvider + loggerProvider log.LoggerProvider + propagator propagation.TextMapPropagator + shutdown shutdownFunc +} + +// TracerProvider returns a configured trace.TracerProvider. +func (s *SDK) TracerProvider() trace.TracerProvider { + return s.tracerProvider +} + +// MeterProvider returns a configured metric.MeterProvider. +func (s *SDK) MeterProvider() metric.MeterProvider { + return s.meterProvider +} + +// LoggerProvider returns a configured log.LoggerProvider. +func (s *SDK) LoggerProvider() log.LoggerProvider { + return s.loggerProvider +} + +// Propagator returns a configured propagation.TextMapPropagator. +func (s *SDK) Propagator() propagation.TextMapPropagator { + return s.propagator +} + +// Shutdown calls shutdown on all configured providers. +func (s *SDK) Shutdown(ctx context.Context) error { + return s.shutdown(ctx) +} + +var noopSDK = SDK{ + loggerProvider: nooplog.LoggerProvider{}, + meterProvider: noopmetric.MeterProvider{}, + tracerProvider: nooptrace.TracerProvider{}, + propagator: propagation.NewCompositeTextMapPropagator(), + shutdown: func(context.Context) error { return nil }, +} + +func parseConfigFileFromEnvironment(filename string) (ConfigurationOption, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + + // Parse a configuration file into an OpenTelemetryConfiguration model. + c, err := ParseYAML(b) + if err != nil { + return nil, err + } + + // Create SDK components with the parsed configuration. + return WithOpenTelemetryConfiguration(*c), nil +} + +// NewSDK creates SDK providers based on the configuration model. It checks the local environment and +// uses the file set in the variable `OTEL_EXPERIMENTAL_CONFIG_FILE` to configure the SDK automatically. +// Any file defined by `OTEL_EXPERIMENTAL_CONFIG_FILE` will supersede all files passed with +// [WithOpenTelemetryConfiguration]. +func NewSDK(opts ...ConfigurationOption) (SDK, error) { + filename, ok := os.LookupEnv(envVarConfigFile) + if ok { + opt, err := parseConfigFileFromEnvironment(filename) + if err != nil { + return noopSDK, err + } + opts = append(opts, opt) + } + o := configOptions{ + ctx: context.Background(), + } + for _, opt := range opts { + o = opt.apply(o) + } + if o.opentelemetryConfig.Disabled != nil && *o.opentelemetryConfig.Disabled { + return noopSDK, nil + } + + r, err := newResource(o.opentelemetryConfig.Resource) + if err != nil { + return noopSDK, err + } + + p, err := newPropagator(o.opentelemetryConfig.Propagator) + if err != nil { + return noopSDK, err + } + + mp, mpShutdown, err := meterProvider(o, r) + if err != nil { + return noopSDK, err + } + + tp, tpShutdown, err := tracerProvider(o, r) + if err != nil { + return noopSDK, err + } + + lp, lpShutdown, err := loggerProvider(o, r) + if err != nil { + return noopSDK, err + } + + return SDK{ + meterProvider: mp, + tracerProvider: tp, + loggerProvider: lp, + propagator: p, + shutdown: func(ctx context.Context) error { + return errors.Join(mpShutdown(ctx), tpShutdown(ctx), lpShutdown(ctx)) + }, + }, nil +} + +// ConfigurationOption configures options for providers. +type ConfigurationOption interface { + apply(configOptions) configOptions +} + +type configurationOptionFunc func(configOptions) configOptions + +func (fn configurationOptionFunc) apply(cfg configOptions) configOptions { + return fn(cfg) +} + +// WithContext sets the context.Context for the SDK. +func WithContext(ctx context.Context) ConfigurationOption { + return configurationOptionFunc(func(c configOptions) configOptions { + c.ctx = ctx + return c + }) +} + +// WithOpenTelemetryConfiguration sets the OpenTelemetryConfiguration used +// to produce the SDK. +func WithOpenTelemetryConfiguration(cfg OpenTelemetryConfiguration) ConfigurationOption { + return configurationOptionFunc(func(c configOptions) configOptions { + c.opentelemetryConfig = cfg + return c + }) +} + +// WithLoggerProviderOptions appends LoggerProviderOptions used for constructing +// the LoggerProvider. OpenTelemetryConfiguration takes precedence over these options. +func WithLoggerProviderOptions(opts ...sdklog.LoggerProviderOption) ConfigurationOption { + return configurationOptionFunc(func(c configOptions) configOptions { + c.loggerProviderOptions = append(c.loggerProviderOptions, opts...) + return c + }) +} + +// WithMeterProviderOptions appends metric.Options used for constructing the +// MeterProvider. OpenTelemetryConfiguration takes precedence over these options. +func WithMeterProviderOptions(opts ...sdkmetric.Option) ConfigurationOption { + return configurationOptionFunc(func(c configOptions) configOptions { + c.meterProviderOptions = append(c.meterProviderOptions, opts...) + return c + }) +} + +// WithTracerProviderOptions appends TracerProviderOptions used for constructing +// the TracerProvider. OpenTelemetryConfiguration takes precedence over these options. +func WithTracerProviderOptions(opts ...sdktrace.TracerProviderOption) ConfigurationOption { + return configurationOptionFunc(func(c configOptions) configOptions { + c.tracerProviderOptions = append(c.tracerProviderOptions, opts...) + return c + }) +} + +// ParseYAML parses a YAML configuration file into an OpenTelemetryConfiguration. +func ParseYAML(file []byte) (*OpenTelemetryConfiguration, error) { + file, err := provider.ReplaceEnvVars(file) + if err != nil { + return nil, err + } + var cfg OpenTelemetryConfiguration + err = yaml.Unmarshal(file, &cfg) + if err != nil { + return nil, err + } + + return &cfg, nil +} diff --git a/otelconf/x/config_common.go b/otelconf/x/config_common.go new file mode 100644 index 00000000000..91a40a8cd86 --- /dev/null +++ b/otelconf/x/config_common.go @@ -0,0 +1,323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x // import "go.opentelemetry.io/contrib/otelconf/x" + +import ( + "context" + "errors" + "fmt" + "reflect" + + "go.opentelemetry.io/otel/baggage" + sdklog "go.opentelemetry.io/otel/sdk/log" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + sdktrace "go.opentelemetry.io/otel/sdk/trace" +) + +const ( + compressionGzip = "gzip" + compressionNone = "none" +) + +var enumValuesAttributeType = []any{ + nil, + "string", + "bool", + "int", + "double", + "string_array", + "bool_array", + "int_array", + "double_array", +} + +var enumValuesViewSelectorInstrumentType = []any{ + "counter", + "gauge", + "histogram", + "observable_counter", + "observable_gauge", + "observable_up_down_counter", + "up_down_counter", +} + +var enumValuesOTLPMetricDefaultHistogramAggregation = []any{ + "explicit_bucket_histogram", + "base2_exponential_bucket_histogram", +} + +type configOptions struct { + ctx context.Context + opentelemetryConfig OpenTelemetryConfiguration + loggerProviderOptions []sdklog.LoggerProviderOption + meterProviderOptions []sdkmetric.Option + tracerProviderOptions []sdktrace.TracerProviderOption +} + +type shutdownFunc func(context.Context) error + +func noopShutdown(context.Context) error { + return nil +} + +type errBound struct { + Field string + Bound int + Op string +} + +func (e *errBound) Error() string { + return fmt.Sprintf("field %s: must be %s %d", e.Field, e.Op, e.Bound) +} + +func (e *errBound) Is(target error) bool { + t, ok := target.(*errBound) + if !ok { + return false + } + return e.Field == t.Field && e.Bound == t.Bound && e.Op == t.Op +} + +type errRequired struct { + Object any + Field string +} + +func (e *errRequired) Error() string { + return fmt.Sprintf("field %s in %s: required", e.Field, reflect.TypeOf(e.Object)) +} + +func (e *errRequired) Is(target error) bool { + t, ok := target.(*errRequired) + if !ok { + return false + } + return reflect.TypeOf(e.Object) == reflect.TypeOf(t.Object) && e.Field == t.Field +} + +type errUnmarshal struct { + Object any +} + +func (e *errUnmarshal) Error() string { + return fmt.Sprintf("unmarshal error in %T", e.Object) +} + +func (e *errUnmarshal) Is(target error) bool { + t, ok := target.(*errUnmarshal) + if !ok { + return false + } + return reflect.TypeOf(e.Object) == reflect.TypeOf(t.Object) +} + +// newErrGreaterOrEqualZero creates a new error indicating that the field must be greater than +// or equal to zero. +func newErrGreaterOrEqualZero(field string) error { + return &errBound{Field: field, Bound: 0, Op: ">="} +} + +// newErrGreaterThanZero creates a new error indicating that the field must be greater +// than zero. +func newErrGreaterThanZero(field string) error { + return &errBound{Field: field, Bound: 0, Op: ">"} +} + +// newErrRequired creates a new error indicating that the exporter field is required. +func newErrRequired(object any, field string) error { + return &errRequired{Object: object, Field: field} +} + +// newErrUnmarshal creates a new error indicating that an error occurred during unmarshaling. +func newErrUnmarshal(object any) error { + return &errUnmarshal{Object: object} +} + +type errInvalid struct { + Identifier string +} + +func (e *errInvalid) Error() string { + return "invalid config: " + e.Identifier +} + +func (e *errInvalid) Is(target error) bool { + t, ok := target.(*errInvalid) + if !ok { + return false + } + return reflect.TypeOf(e.Identifier) == reflect.TypeOf(t.Identifier) +} + +// newErrInvalid creates a new error indicating that an error occurred due to misconfiguration. +func newErrInvalid(id string) error { + return &errInvalid{Identifier: id} +} + +// unmarshalSamplerTypes handles always_on and always_off sampler unmarshaling. +func unmarshalSamplerTypes(raw map[string]any, plain *Sampler) { + // always_on can be nil, must check and set here + if _, ok := raw["always_on"]; ok { + plain.AlwaysOn = AlwaysOnSampler{} + } + // always_off can be nil, must check and set here + if _, ok := raw["always_off"]; ok { + plain.AlwaysOff = AlwaysOffSampler{} + } +} + +// unmarshalMetricProducer handles opencensus metric producer unmarshaling. +func unmarshalMetricProducer(raw map[string]any, plain *MetricProducer) { + // opencensus can be nil, must check and set here + if v, ok := raw["opencensus"]; ok && v == nil { + delete(raw, "opencensus") + plain.Opencensus = OpenCensusMetricProducer{} + } + if len(raw) > 0 { + plain.AdditionalProperties = raw + } +} + +// validatePeriodicMetricReader handles validation for PeriodicMetricReader. +func validatePeriodicMetricReader(plain *PeriodicMetricReader) error { + if plain.Timeout != nil && 0 > *plain.Timeout { + return newErrGreaterOrEqualZero("timeout") + } + if plain.Interval != nil && 0 > *plain.Interval { + return newErrGreaterOrEqualZero("interval") + } + return nil +} + +// validateBatchLogRecordProcessor handles validation for BatchLogRecordProcessor. +func validateBatchLogRecordProcessor(plain *BatchLogRecordProcessor) error { + if plain.ExportTimeout != nil && 0 > *plain.ExportTimeout { + return newErrGreaterOrEqualZero("export_timeout") + } + if plain.MaxExportBatchSize != nil && 0 >= *plain.MaxExportBatchSize { + return newErrGreaterThanZero("max_export_batch_size") + } + if plain.MaxQueueSize != nil && 0 >= *plain.MaxQueueSize { + return newErrGreaterThanZero("max_queue_size") + } + if plain.ScheduleDelay != nil && 0 > *plain.ScheduleDelay { + return newErrGreaterOrEqualZero("schedule_delay") + } + return nil +} + +// validateBatchSpanProcessor handles validation for BatchSpanProcessor. +func validateBatchSpanProcessor(plain *BatchSpanProcessor) error { + if plain.ExportTimeout != nil && 0 > *plain.ExportTimeout { + return newErrGreaterOrEqualZero("export_timeout") + } + if plain.MaxExportBatchSize != nil && 0 >= *plain.MaxExportBatchSize { + return newErrGreaterThanZero("max_export_batch_size") + } + if plain.MaxQueueSize != nil && 0 >= *plain.MaxQueueSize { + return newErrGreaterThanZero("max_queue_size") + } + if plain.ScheduleDelay != nil && 0 > *plain.ScheduleDelay { + return newErrGreaterOrEqualZero("schedule_delay") + } + return nil +} + +// validateCardinalityLimits handles validation for CardinalityLimits. +func validateCardinalityLimits(plain *CardinalityLimits) error { + if plain.Counter != nil && 0 >= *plain.Counter { + return newErrGreaterThanZero("counter") + } + if plain.Default != nil && 0 >= *plain.Default { + return newErrGreaterThanZero("default") + } + if plain.Gauge != nil && 0 >= *plain.Gauge { + return newErrGreaterThanZero("gauge") + } + if plain.Histogram != nil && 0 >= *plain.Histogram { + return newErrGreaterThanZero("histogram") + } + if plain.ObservableCounter != nil && 0 >= *plain.ObservableCounter { + return newErrGreaterThanZero("observable_counter") + } + if plain.ObservableGauge != nil && 0 >= *plain.ObservableGauge { + return newErrGreaterThanZero("observable_gauge") + } + if plain.ObservableUpDownCounter != nil && 0 >= *plain.ObservableUpDownCounter { + return newErrGreaterThanZero("observable_up_down_counter") + } + if plain.UpDownCounter != nil && 0 >= *plain.UpDownCounter { + return newErrGreaterThanZero("up_down_counter") + } + return nil +} + +// validateSpanLimits handles validation for SpanLimits. +func validateSpanLimits(plain *SpanLimits) error { + if plain.AttributeCountLimit != nil && 0 > *plain.AttributeCountLimit { + return newErrGreaterOrEqualZero("attribute_count_limit") + } + if plain.AttributeValueLengthLimit != nil && 0 > *plain.AttributeValueLengthLimit { + return newErrGreaterOrEqualZero("attribute_value_length_limit") + } + if plain.EventAttributeCountLimit != nil && 0 > *plain.EventAttributeCountLimit { + return newErrGreaterOrEqualZero("event_attribute_count_limit") + } + if plain.EventCountLimit != nil && 0 > *plain.EventCountLimit { + return newErrGreaterOrEqualZero("event_count_limit") + } + if plain.LinkAttributeCountLimit != nil && 0 > *plain.LinkAttributeCountLimit { + return newErrGreaterOrEqualZero("link_attribute_count_limit") + } + if plain.LinkCountLimit != nil && 0 > *plain.LinkCountLimit { + return newErrGreaterOrEqualZero("link_count_limit") + } + return nil +} + +func ptr[T any](v T) *T { + return &v +} + +// createHeadersConfig combines the two header config fields. Headers take precedence over headersList. +func createHeadersConfig(headers []NameStringValuePair, headersList *string) (map[string]string, error) { + result := make(map[string]string) + if headersList != nil { + // Parsing follows https://github.com/open-telemetry/opentelemetry-configuration/blob/568e5080816d40d75792eb754fc96bde09654159/schema/type_descriptions.yaml#L584. + headerslist, err := baggage.Parse(*headersList) + if err != nil { + return nil, errors.Join(newErrInvalid("invalid headers_list"), err) + } + for _, kv := range headerslist.Members() { + result[kv.Key()] = kv.Value() + } + } + // Headers take precedence over HeadersList, so this has to be after HeadersList is processed. + for _, kv := range headers { + if kv.Value != nil { + result[kv.Name] = *kv.Value + } + } + return result, nil +} + +// supportedInstrumentType return an error if the instrument type is not supported. +func supportedInstrumentType(in InstrumentType) error { + for _, expected := range enumValuesViewSelectorInstrumentType { + if string(in) == fmt.Sprintf("%s", expected) { + return nil + } + } + return newErrInvalid(fmt.Sprintf("invalid selector (expected one of %#v): %#v", enumValuesViewSelectorInstrumentType, in)) +} + +// supportedHistogramAggregation return an error if the histogram aggregation is not supported. +func supportedHistogramAggregation(in ExporterDefaultHistogramAggregation) error { + for _, expected := range enumValuesOTLPMetricDefaultHistogramAggregation { + if string(in) == fmt.Sprintf("%s", expected) { + return nil + } + } + return newErrInvalid(fmt.Sprintf("invalid histogram aggregation (expected one of %#v): %#v", enumValuesOTLPMetricDefaultHistogramAggregation, in)) +} diff --git a/otelconf/x/config_json.go b/otelconf/x/config_json.go new file mode 100644 index 00000000000..308ffe83c39 --- /dev/null +++ b/otelconf/x/config_json.go @@ -0,0 +1,943 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x // import "go.opentelemetry.io/contrib/otelconf/x" + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ConsoleExporter) UnmarshalJSON(b []byte) error { + type plain ConsoleExporter + var p plain + if err := json.Unmarshal(b, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // If key is present (even if empty object), ensure non-nil value. + if p == nil { + *j = ConsoleExporter{} + } else { + *j = ConsoleExporter(p) + } + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *B3Propagator) UnmarshalJSON(b []byte) error { + type plain B3Propagator + var p plain + if err := json.Unmarshal(b, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // If key is present (even if empty object), ensure non-nil value. + if p == nil { + *j = B3Propagator{} + } else { + *j = B3Propagator(p) + } + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *B3MultiPropagator) UnmarshalJSON(b []byte) error { + type plain B3MultiPropagator + var p plain + if err := json.Unmarshal(b, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // If key is present (even if empty object), ensure non-nil value. + if p == nil { + *j = B3MultiPropagator{} + } else { + *j = B3MultiPropagator(p) + } + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *BaggagePropagator) UnmarshalJSON(b []byte) error { + type plain BaggagePropagator + var p plain + if err := json.Unmarshal(b, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // If key is present (even if empty object), ensure non-nil value. + if p == nil { + *j = BaggagePropagator{} + } else { + *j = BaggagePropagator(p) + } + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *JaegerPropagator) UnmarshalJSON(b []byte) error { + type plain JaegerPropagator + var p plain + if err := json.Unmarshal(b, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // If key is present (even if empty object), ensure non-nil value. + if p == nil { + *j = JaegerPropagator{} + } else { + *j = JaegerPropagator(p) + } + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OpenTracingPropagator) UnmarshalJSON(b []byte) error { + type plain OpenTracingPropagator + var p plain + if err := json.Unmarshal(b, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // If key is present (even if empty object), ensure non-nil value. + if p == nil { + *j = OpenTracingPropagator{} + } else { + *j = OpenTracingPropagator(p) + } + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TraceContextPropagator) UnmarshalJSON(b []byte) error { + type plain TraceContextPropagator + var p plain + if err := json.Unmarshal(b, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // If key is present (even if empty object), ensure non-nil value. + if p == nil { + *j = TraceContextPropagator{} + } else { + *j = TraceContextPropagator(p) + } + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ExperimentalContainerResourceDetector) UnmarshalJSON(b []byte) error { + type plain ExperimentalContainerResourceDetector + var p plain + if err := json.Unmarshal(b, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // If key is present (even if empty object), ensure non-nil value. + if p == nil { + *j = ExperimentalContainerResourceDetector{} + } else { + *j = ExperimentalContainerResourceDetector(p) + } + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ExperimentalHostResourceDetector) UnmarshalJSON(b []byte) error { + type plain ExperimentalHostResourceDetector + var p plain + if err := json.Unmarshal(b, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // If key is present (even if empty object), ensure non-nil value. + if p == nil { + *j = ExperimentalHostResourceDetector{} + } else { + *j = ExperimentalHostResourceDetector(p) + } + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ExperimentalProcessResourceDetector) UnmarshalJSON(b []byte) error { + type plain ExperimentalProcessResourceDetector + var p plain + if err := json.Unmarshal(b, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // If key is present (even if empty object), ensure non-nil value. + if p == nil { + *j = ExperimentalProcessResourceDetector{} + } else { + *j = ExperimentalProcessResourceDetector(p) + } + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ExperimentalServiceResourceDetector) UnmarshalJSON(b []byte) error { + type plain ExperimentalServiceResourceDetector + var p plain + if err := json.Unmarshal(b, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // If key is present (even if empty object), ensure non-nil value. + if p == nil { + *j = ExperimentalServiceResourceDetector{} + } else { + *j = ExperimentalServiceResourceDetector(p) + } + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ExperimentalResourceDetector) UnmarshalJSON(b []byte) error { + // Use a shadow struct with a RawMessage field to detect key presence. + type Plain ExperimentalResourceDetector + type shadow struct { + Plain + Container json.RawMessage `json:"container"` + Host json.RawMessage `json:"host"` + Process json.RawMessage `json:"process"` + Service json.RawMessage `json:"service"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + if sh.Container != nil { + var c ExperimentalContainerResourceDetector + if err := json.Unmarshal(sh.Container, &c); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.Container = c + } + + if sh.Host != nil { + var c ExperimentalHostResourceDetector + if err := json.Unmarshal(sh.Host, &c); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.Host = c + } + + if sh.Process != nil { + var c ExperimentalProcessResourceDetector + if err := json.Unmarshal(sh.Process, &c); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.Process = c + } + + if sh.Service != nil { + var c ExperimentalServiceResourceDetector + if err := json.Unmarshal(sh.Service, &c); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.Service = c + } + *j = ExperimentalResourceDetector(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PushMetricExporter) UnmarshalJSON(b []byte) error { + // Use a shadow struct with a RawMessage field to detect key presence. + type Plain PushMetricExporter + type shadow struct { + Plain + Console json.RawMessage `json:"console"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + if sh.Console != nil { + var c ConsoleMetricExporter + if err := json.Unmarshal(sh.Console, &c); err != nil { + return err + } + sh.Plain.Console = &c + } + *j = PushMetricExporter(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpanExporter) UnmarshalJSON(b []byte) error { + // Use a shadow struct with a RawMessage field to detect key presence. + type Plain SpanExporter + type shadow struct { + Plain + Console json.RawMessage `json:"console"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + if sh.Console != nil { + var c ConsoleExporter + if err := json.Unmarshal(sh.Console, &c); err != nil { + return err + } + sh.Plain.Console = c + } + *j = SpanExporter(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *LogRecordExporter) UnmarshalJSON(b []byte) error { + // Use a shadow struct with a RawMessage field to detect key presence. + type Plain LogRecordExporter + type shadow struct { + Plain + Console json.RawMessage `json:"console"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + if sh.Console != nil { + var c ConsoleExporter + if err := json.Unmarshal(sh.Console, &c); err != nil { + return err + } + sh.Plain.Console = c + } + *j = LogRecordExporter(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TextMapPropagator) UnmarshalJSON(b []byte) error { + type Plain TextMapPropagator + type shadow struct { + Plain + B3 json.RawMessage `json:"b3"` + B3Multi json.RawMessage `json:"b3multi"` + Baggage json.RawMessage `json:"baggage"` + Jaeger json.RawMessage `json:"jaeger"` + Ottrace json.RawMessage `json:"ottrace"` + Tracecontext json.RawMessage `json:"tracecontext"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + if sh.B3 != nil { + var p B3Propagator + if err := json.Unmarshal(sh.B3, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.B3 = p + } + + if sh.B3Multi != nil { + var p B3MultiPropagator + if err := json.Unmarshal(sh.B3Multi, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.B3Multi = p + } + + if sh.Baggage != nil { + var p BaggagePropagator + if err := json.Unmarshal(sh.Baggage, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.Baggage = p + } + + if sh.Jaeger != nil { + var p JaegerPropagator + if err := json.Unmarshal(sh.Jaeger, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.Jaeger = p + } + + if sh.Ottrace != nil { + var p OpenTracingPropagator + if err := json.Unmarshal(sh.Ottrace, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.Ottrace = p + } + + if sh.Tracecontext != nil { + var p TraceContextPropagator + if err := json.Unmarshal(sh.Tracecontext, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.Tracecontext = p + } + + *j = TextMapPropagator(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { + type Plain BatchLogRecordProcessor + type shadow struct { + Plain + Exporter json.RawMessage `json:"exporter"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Exporter == nil { + return newErrRequired(j, "exporter") + } + // Hydrate the exporter into the underlying field. + if err := json.Unmarshal(sh.Exporter, &sh.Plain.Exporter); err != nil { + return err + } + if err := validateBatchLogRecordProcessor((*BatchLogRecordProcessor)(&sh.Plain)); err != nil { + return err + } + *j = BatchLogRecordProcessor(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { + type Plain BatchSpanProcessor + type shadow struct { + Plain + Exporter json.RawMessage `json:"exporter"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Exporter == nil { + return newErrRequired(j, "exporter") + } + // Hydrate the exporter into the underlying field. + if err := json.Unmarshal(sh.Exporter, &sh.Plain.Exporter); err != nil { + return err + } + if err := validateBatchSpanProcessor((*BatchSpanProcessor)(&sh.Plain)); err != nil { + return err + } + *j = BatchSpanProcessor(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { + type Plain OpenTelemetryConfiguration + type shadow struct { + Plain + FileFormat json.RawMessage `json:"file_format"` + LoggerProvider json.RawMessage `json:"logger_provider"` + MeterProvider json.RawMessage `json:"meter_provider"` + TracerProvider json.RawMessage `json:"tracer_provider"` + Propagator json.RawMessage `json:"propagator"` + Resource json.RawMessage `json:"resource"` + InstrumentationDevelopment json.RawMessage `json:"instrumentation/development"` + AttributeLimits json.RawMessage `json:"attribute_limits"` + Disabled json.RawMessage `json:"disabled"` + LogLevel json.RawMessage `json:"log_level"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + if len(sh.FileFormat) == 0 { + return newErrRequired(j, "file_format") + } + + if err := json.Unmarshal(sh.FileFormat, &sh.Plain.FileFormat); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + if sh.LoggerProvider != nil { + var l LoggerProvider + if err := json.Unmarshal(sh.LoggerProvider, &l); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.LoggerProvider = &l + } + + if sh.MeterProvider != nil { + var m MeterProvider + if err := json.Unmarshal(sh.MeterProvider, &m); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.MeterProvider = &m + } + + if sh.TracerProvider != nil { + var t TracerProvider + if err := json.Unmarshal(sh.TracerProvider, &t); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.TracerProvider = &t + } + + if sh.Propagator != nil { + var p Propagator + if err := json.Unmarshal(sh.Propagator, &p); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.Propagator = &p + } + + if sh.Resource != nil { + var r Resource + if err := json.Unmarshal(sh.Resource, &r); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.Resource = &r + } + + if sh.InstrumentationDevelopment != nil { + var r ExperimentalInstrumentation + if err := json.Unmarshal(sh.InstrumentationDevelopment, &r); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.InstrumentationDevelopment = &r + } + + if sh.AttributeLimits != nil { + var r AttributeLimits + if err := json.Unmarshal(sh.AttributeLimits, &r); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + sh.Plain.AttributeLimits = &r + } + + if sh.Disabled != nil { + if err := json.Unmarshal(sh.Disabled, &sh.Plain.Disabled); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + } else { + // Configure if the SDK is disabled or not. + // If omitted or null, false is used. + sh.Plain.Disabled = ptr(false) + } + + if sh.LogLevel != nil { + if err := json.Unmarshal(sh.LogLevel, &sh.Plain.LogLevel); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + } else { + // Configure the log level of the internal logger used by the SDK. + // If omitted, info is used. + sh.Plain.LogLevel = ptr(SeverityNumberInfo) + } + + *j = OpenTelemetryConfiguration(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { + type Plain PeriodicMetricReader + type shadow struct { + Plain + Exporter json.RawMessage `json:"exporter"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Exporter == nil { + return newErrRequired(j, "exporter") + } + // Hydrate the exporter into the underlying field. + if err := json.Unmarshal(sh.Exporter, &sh.Plain.Exporter); err != nil { + return err + } + err := validatePeriodicMetricReader((*PeriodicMetricReader)(&sh.Plain)) + if err != nil { + return err + } + *j = PeriodicMetricReader(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *CardinalityLimits) UnmarshalJSON(value []byte) error { + type Plain CardinalityLimits + var plain Plain + if err := json.Unmarshal(value, &plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if err := validateCardinalityLimits((*CardinalityLimits)(&plain)); err != nil { + return err + } + *j = CardinalityLimits(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpanLimits) UnmarshalJSON(value []byte) error { + type Plain SpanLimits + var plain Plain + if err := json.Unmarshal(value, &plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if err := validateSpanLimits((*SpanLimits)(&plain)); err != nil { + return err + } + *j = SpanLimits(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPHttpMetricExporter) UnmarshalJSON(b []byte) error { + type Plain OTLPHttpMetricExporter + type shadow struct { + Plain + Endpoint json.RawMessage `json:"endpoint"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Endpoint == nil { + return newErrRequired(j, "endpoint") + } + if err := json.Unmarshal(sh.Endpoint, &sh.Plain.Endpoint); err != nil { + return err + } + if sh.Timeout != nil && 0 > *sh.Timeout { + return newErrGreaterOrEqualZero("timeout") + } + *j = OTLPHttpMetricExporter(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPGrpcMetricExporter) UnmarshalJSON(b []byte) error { + type Plain OTLPGrpcMetricExporter + type shadow struct { + Plain + Endpoint json.RawMessage `json:"endpoint"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Endpoint == nil { + return newErrRequired(j, "endpoint") + } + if err := json.Unmarshal(sh.Endpoint, &sh.Plain.Endpoint); err != nil { + return err + } + if sh.Timeout != nil && 0 > *sh.Timeout { + return newErrGreaterOrEqualZero("timeout") + } + *j = OTLPGrpcMetricExporter(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPHttpExporter) UnmarshalJSON(b []byte) error { + type Plain OTLPHttpExporter + type shadow struct { + Plain + Endpoint json.RawMessage `json:"endpoint"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Endpoint == nil { + return newErrRequired(j, "endpoint") + } + if err := json.Unmarshal(sh.Endpoint, &sh.Plain.Endpoint); err != nil { + return err + } + if sh.Timeout != nil && 0 > *sh.Timeout { + return newErrGreaterOrEqualZero("timeout") + } + *j = OTLPHttpExporter(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPGrpcExporter) UnmarshalJSON(b []byte) error { + type Plain OTLPGrpcExporter + type shadow struct { + Plain + Endpoint json.RawMessage `json:"endpoint"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Endpoint == nil { + return newErrRequired(j, "endpoint") + } + if err := json.Unmarshal(sh.Endpoint, &sh.Plain.Endpoint); err != nil { + return err + } + if sh.Timeout != nil && 0 > *sh.Timeout { + return newErrGreaterOrEqualZero("timeout") + } + *j = OTLPGrpcExporter(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *AttributeType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + var ok bool + for _, expected := range enumValuesAttributeType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return newErrInvalid(fmt.Sprintf("unexpected value type %#v, expected one of %#v)", v, enumValuesAttributeType)) + } + *j = AttributeType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *AttributeNameValue) UnmarshalJSON(b []byte) error { + type Plain AttributeNameValue + type shadow struct { + Plain + Name json.RawMessage `json:"name"` + Value json.RawMessage `json:"value"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Name == nil { + return newErrRequired(j, "name") + } + if err := json.Unmarshal(sh.Name, &sh.Plain.Name); err != nil { + return err + } + if sh.Value == nil { + return newErrRequired(j, "value") + } + if err := json.Unmarshal(sh.Value, &sh.Plain.Value); err != nil { + return err + } + + // json unmarshaller defaults to unmarshalling to float for int values + if sh.Type != nil && *sh.Type == AttributeTypeInt { + val, ok := sh.Plain.Value.(float64) + if ok { + sh.Plain.Value = int(val) + } + } + + if sh.Type != nil && *sh.Type == AttributeTypeIntArray { + m, ok := sh.Plain.Value.([]any) + if ok { + var vals []any + for _, v := range m { + val, ok := v.(float64) + if ok { + vals = append(vals, int(val)) + } else { + vals = append(vals, v) + } + } + sh.Plain.Value = vals + } + } + + *j = AttributeNameValue(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleLogRecordProcessor) UnmarshalJSON(b []byte) error { + type Plain SimpleLogRecordProcessor + type shadow struct { + Plain + Exporter json.RawMessage `json:"exporter"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Exporter == nil { + return newErrRequired(j, "exporter") + } + // Hydrate the exporter into the underlying field. + if err := json.Unmarshal(sh.Exporter, &sh.Plain.Exporter); err != nil { + return err + } + *j = SimpleLogRecordProcessor(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { + type Plain SimpleSpanProcessor + type shadow struct { + Plain + Exporter json.RawMessage `json:"exporter"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Exporter == nil { + return newErrRequired(j, "exporter") + } + // Hydrate the exporter into the underlying field. + if err := json.Unmarshal(sh.Exporter, &sh.Plain.Exporter); err != nil { + return err + } + *j = SimpleSpanProcessor(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *NameStringValuePair) UnmarshalJSON(b []byte) error { + type Plain NameStringValuePair + type shadow struct { + Plain + Name json.RawMessage `json:"name"` + Value json.RawMessage `json:"value"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Name == nil { + return newErrRequired(j, "name") + } + if err := json.Unmarshal(sh.Name, &sh.Plain.Name); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Value == nil { + return newErrRequired(j, "value") + } + if err := json.Unmarshal(sh.Value, &sh.Plain.Value); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + *j = NameStringValuePair(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *InstrumentType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if err := supportedInstrumentType(InstrumentType(v)); err != nil { + return err + } + *j = InstrumentType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ExperimentalPeerServiceMapping) UnmarshalJSON(b []byte) error { + type Plain ExperimentalPeerServiceMapping + type shadow struct { + Plain + Peer json.RawMessage `json:"peer"` + Service json.RawMessage `json:"service"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Peer == nil { + return newErrRequired(j, "peer") + } + if err := json.Unmarshal(sh.Peer, &sh.Plain.Peer); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Service == nil { + return newErrRequired(j, "service") + } + if err := json.Unmarshal(sh.Service, &sh.Plain.Service); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + *j = ExperimentalPeerServiceMapping(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ExporterDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if err := supportedHistogramAggregation(ExporterDefaultHistogramAggregation(v)); err != nil { + return err + } + *j = ExporterDefaultHistogramAggregation(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PullMetricReader) UnmarshalJSON(b []byte) error { + type Plain PullMetricReader + type shadow struct { + Plain + Exporter json.RawMessage `json:"exporter"` + } + var sh shadow + if err := json.Unmarshal(b, &sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if sh.Exporter == nil { + return newErrRequired(j, "exporter") + } + // Hydrate the exporter into the underlying field. + if err := json.Unmarshal(sh.Exporter, &sh.Plain.Exporter); err != nil { + return err + } + *j = PullMetricReader(sh.Plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Sampler) UnmarshalJSON(b []byte) error { + var raw map[string]any + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + type Plain Sampler + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + unmarshalSamplerTypes(raw, (*Sampler)(&plain)) + *j = Sampler(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *MetricProducer) UnmarshalJSON(b []byte) error { + var raw map[string]any + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + type Plain MetricProducer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + unmarshalMetricProducer(raw, (*MetricProducer)(&plain)) + *j = MetricProducer(plain) + return nil +} diff --git a/otelconf/x/config_test.go b/otelconf/x/config_test.go new file mode 100644 index 00000000000..41bf4c1ba21 --- /dev/null +++ b/otelconf/x/config_test.go @@ -0,0 +1,2379 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + lognoop "go.opentelemetry.io/otel/log/noop" + metricnoop "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/propagation" + sdklog "go.opentelemetry.io/otel/sdk/log" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + tracenoop "go.opentelemetry.io/otel/trace/noop" + "go.yaml.in/yaml/v3" +) + +func TestUnmarshalPushMetricExporterInvalidData(t *testing.T) { + cl := PushMetricExporter{} + err := cl.UnmarshalJSON([]byte(`{:2000}`)) + assert.ErrorIs(t, err, newErrUnmarshal(&PushMetricExporter{})) + + cl = PushMetricExporter{} + err = yaml.Unmarshal([]byte("console: !!str str"), &cl) + assert.ErrorIs(t, err, newErrUnmarshal(&PushMetricExporter{})) +} + +func TestUnmarshalLogRecordExporterInvalidData(t *testing.T) { + cl := LogRecordExporter{} + err := cl.UnmarshalJSON([]byte(`{:2000}`)) + assert.ErrorIs(t, err, newErrUnmarshal(&LogRecordExporter{})) + + cl = LogRecordExporter{} + err = cl.UnmarshalJSON([]byte(`{"console":2000}`)) + assert.ErrorIs(t, err, newErrUnmarshal(&ConsoleExporter{})) + + cl = LogRecordExporter{} + err = yaml.Unmarshal([]byte("console: !!str str"), &cl) + assert.ErrorIs(t, err, newErrUnmarshal(&LogRecordExporter{})) +} + +func TestUnmarshalSpanExporterInvalidData(t *testing.T) { + cl := SpanExporter{} + err := cl.UnmarshalJSON([]byte(`{:2000}`)) + assert.ErrorIs(t, err, newErrUnmarshal(&SpanExporter{})) + + cl = SpanExporter{} + err = cl.UnmarshalJSON([]byte(`{"console":2000}`)) + assert.ErrorIs(t, err, newErrUnmarshal(&ConsoleExporter{})) + + cl = SpanExporter{} + err = yaml.Unmarshal([]byte("console: !!str str"), &cl) + assert.ErrorIs(t, err, newErrUnmarshal(&SpanExporter{})) +} + +func TestUnmarshalTextMapPropagator(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantTextMapPropagator TextMapPropagator + }{ + { + name: "valid with b3 propagator", + jsonConfig: []byte(`{"b3":{}}`), + yamlConfig: []byte("b3: {}\n"), + wantTextMapPropagator: TextMapPropagator{B3: B3Propagator{}}, + }, + { + name: "valid with all propagators", + jsonConfig: []byte(`{"b3":{},"b3multi":{},"baggage":{},"jaeger":{},"ottrace":{},"tracecontext":{}}`), + yamlConfig: []byte("b3: {}\nb3multi: {}\nbaggage: {}\njaeger: {}\nottrace: {}\ntracecontext: {}\n"), + wantTextMapPropagator: TextMapPropagator{ + B3: B3Propagator{}, + B3Multi: B3MultiPropagator{}, + Baggage: BaggagePropagator{}, + Jaeger: JaegerPropagator{}, + Ottrace: OpenTracingPropagator{}, + Tracecontext: TraceContextPropagator{}, + }, + }, + { + name: "valid with all propagators nil", + jsonConfig: []byte(`{"b3":null,"b3multi":null,"baggage":null,"jaeger":null,"ottrace":null,"tracecontext":null}`), + yamlConfig: []byte("b3:\nb3multi:\nbaggage:\njaeger:\nottrace:\ntracecontext:\n"), + wantTextMapPropagator: TextMapPropagator{ + B3: B3Propagator{}, + B3Multi: B3MultiPropagator{}, + Baggage: BaggagePropagator{}, + Jaeger: JaegerPropagator{}, + Ottrace: OpenTracingPropagator{}, + Tracecontext: TraceContextPropagator{}, + }, + }, + { + name: "invalid b3 data", + jsonConfig: []byte(`{"b3":2000}`), + yamlConfig: []byte("b3: !!str str"), + wantErrT: newErrUnmarshal(&TextMapPropagator{}), + }, + { + name: "invalid b3multi data", + jsonConfig: []byte(`{"b3multi":2000}`), + yamlConfig: []byte("b3multi: !!str str"), + wantErrT: newErrUnmarshal(&TextMapPropagator{}), + }, + { + name: "invalid baggage data", + jsonConfig: []byte(`{"baggage":2000}`), + yamlConfig: []byte("baggage: !!str str"), + wantErrT: newErrUnmarshal(&TextMapPropagator{}), + }, + { + name: "invalid jaeger data", + jsonConfig: []byte(`{"jaeger":2000}`), + yamlConfig: []byte("jaeger: !!str str"), + wantErrT: newErrUnmarshal(&TextMapPropagator{}), + }, + { + name: "invalid ottrace data", + jsonConfig: []byte(`{"ottrace":2000}`), + yamlConfig: []byte("ottrace: !!str str"), + wantErrT: newErrUnmarshal(&TextMapPropagator{}), + }, + { + name: "invalid tracecontext data", + jsonConfig: []byte(`{"tracecontext":2000}`), + yamlConfig: []byte("tracecontext: !!str str"), + wantErrT: newErrUnmarshal(&TextMapPropagator{}), + }, + } { + t.Run(tt.name, func(t *testing.T) { + cl := TextMapPropagator{} + err := cl.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantTextMapPropagator, cl) + + cl = TextMapPropagator{} + err = yaml.Unmarshal(tt.yamlConfig, &cl) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantTextMapPropagator, cl) + }) + } +} + +func TestUnmarshalSimpleLogRecordProcessor(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantExporter LogRecordExporter + }{ + { + name: "valid with console exporter", + jsonConfig: []byte(`{"exporter":{"console":{}}}`), + yamlConfig: []byte("exporter:\n console: {}"), + wantExporter: LogRecordExporter{Console: ConsoleExporter{}}, + }, + { + name: "valid with null console exporter", + jsonConfig: []byte(`{"exporter":{"console":null}}`), + yamlConfig: []byte("exporter:\n console:\n"), + wantExporter: LogRecordExporter{Console: ConsoleExporter{}}, + }, + { + name: "missing required exporter field", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + wantErrT: newErrRequired(&SimpleLogRecordProcessor{}, "exporter"), + }, + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("exporter:\n console: []"), + wantErrT: newErrUnmarshal(&SimpleLogRecordProcessor{}), + }, + } { + t.Run(tt.name, func(t *testing.T) { + cl := SimpleLogRecordProcessor{} + err := cl.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl.Exporter) + + cl = SimpleLogRecordProcessor{} + err = yaml.Unmarshal(tt.yamlConfig, &cl) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl.Exporter) + }) + } +} + +func TestUnmarshalSimpleSpanProcessor(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantExporter SpanExporter + }{ + { + name: "valid with null console exporter", + jsonConfig: []byte(`{"exporter":{"console":null}}`), + yamlConfig: []byte("exporter:\n console:\n"), + wantExporter: SpanExporter{Console: ConsoleExporter{}}, + }, + { + name: "valid with console exporter", + jsonConfig: []byte(`{"exporter":{"console":{}}}`), + yamlConfig: []byte("exporter:\n console: {}"), + wantExporter: SpanExporter{Console: ConsoleExporter{}}, + }, + { + name: "missing required exporter field", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + wantErrT: newErrRequired(&SimpleSpanProcessor{}, "exporter"), + }, + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("exporter:\n console: []"), + wantErrT: newErrUnmarshal(&SimpleSpanProcessor{}), + }, + } { + t.Run(tt.name, func(t *testing.T) { + cl := SimpleSpanProcessor{} + err := cl.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl.Exporter) + + cl = SimpleSpanProcessor{} + err = yaml.Unmarshal(tt.yamlConfig, &cl) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl.Exporter) + }) + } +} + +func TestUnmarshalBatchLogRecordProcessor(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantExporter LogRecordExporter + }{ + { + name: "valid with console exporter", + jsonConfig: []byte(`{"exporter":{"console":{}}}`), + yamlConfig: []byte("exporter:\n console: {}"), + wantExporter: LogRecordExporter{Console: ConsoleExporter{}}, + }, + { + name: "valid with null console exporter", + jsonConfig: []byte(`{"exporter":{"console":null}}`), + yamlConfig: []byte("exporter:\n console:\n"), + wantExporter: LogRecordExporter{Console: ConsoleExporter{}}, + }, + { + name: "valid with all fields positive", + jsonConfig: []byte(`{"exporter":{"console":{}},"export_timeout":5000,"max_export_batch_size":512,"max_queue_size":2048,"schedule_delay":1000}`), + yamlConfig: []byte("exporter:\n console: {}\nexport_timeout: 5000\nmax_export_batch_size: 512\nmax_queue_size: 2048\nschedule_delay: 1000"), + wantExporter: LogRecordExporter{Console: ConsoleExporter{}}, + }, + { + name: "valid with zero export_timeout", + jsonConfig: []byte(`{"exporter":{"console":{}},"export_timeout":0}`), + yamlConfig: []byte("exporter:\n console: {}\nexport_timeout: 0"), + wantExporter: LogRecordExporter{Console: ConsoleExporter{}}, + }, + { + name: "valid with zero schedule_delay", + jsonConfig: []byte(`{"exporter":{"console":{}},"schedule_delay":0}`), + yamlConfig: []byte("exporter:\n console: {}\nschedule_delay: 0"), + wantExporter: LogRecordExporter{Console: ConsoleExporter{}}, + }, + { + name: "missing required exporter field", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + wantErrT: newErrRequired(&BatchLogRecordProcessor{}, "exporter"), + }, + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("exporter:\n console: {}\nexport_timeout: !!str str"), + wantErrT: newErrUnmarshal(&BatchLogRecordProcessor{}), + }, + { + name: "invalid export_timeout negative", + jsonConfig: []byte(`{"exporter":{"console":{}},"export_timeout":-1}`), + yamlConfig: []byte("exporter:\n console: {}\nexport_timeout: -1"), + wantErrT: newErrGreaterOrEqualZero("export_timeout"), + }, + { + name: "invalid max_export_batch_size zero", + jsonConfig: []byte(`{"exporter":{"console":{}},"max_export_batch_size":0}`), + yamlConfig: []byte("exporter:\n console: {}\nmax_export_batch_size: 0"), + wantErrT: newErrGreaterThanZero("max_export_batch_size"), + }, + { + name: "invalid max_export_batch_size negative", + jsonConfig: []byte(`{"exporter":{"console":{}},"max_export_batch_size":-1}`), + yamlConfig: []byte("exporter:\n console: {}\nmax_export_batch_size: -1"), + wantErrT: newErrGreaterThanZero("max_export_batch_size"), + }, + { + name: "invalid max_queue_size zero", + jsonConfig: []byte(`{"exporter":{"console":{}},"max_queue_size":0}`), + yamlConfig: []byte("exporter:\n console: {}\nmax_queue_size: 0"), + wantErrT: newErrGreaterThanZero("max_queue_size"), + }, + { + name: "invalid max_queue_size negative", + jsonConfig: []byte(`{"exporter":{"console":{}},"max_queue_size":-1}`), + yamlConfig: []byte("exporter:\n console: {}\nmax_queue_size: -1"), + wantErrT: newErrGreaterThanZero("max_queue_size"), + }, + { + name: "invalid schedule_delay negative", + jsonConfig: []byte(`{"exporter":{"console":{}},"schedule_delay":-1}`), + yamlConfig: []byte("exporter:\n console: {}\nschedule_delay: -1"), + wantErrT: newErrGreaterOrEqualZero("schedule_delay"), + }, + } { + t.Run(tt.name, func(t *testing.T) { + cl := BatchLogRecordProcessor{} + err := cl.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl.Exporter) + + cl = BatchLogRecordProcessor{} + err = yaml.Unmarshal(tt.yamlConfig, &cl) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl.Exporter) + }) + } +} + +func TestNewSDK(t *testing.T) { + tests := []struct { + name string + cfg []ConfigurationOption + wantTracerProvider any + wantMeterProvider any + wantLoggerProvider any + wantPropagator any + wantErr error + wantShutdownErr error + }{ + { + name: "no-configuration", + wantTracerProvider: tracenoop.NewTracerProvider(), + wantMeterProvider: metricnoop.NewMeterProvider(), + wantLoggerProvider: lognoop.NewLoggerProvider(), + wantPropagator: propagation.NewCompositeTextMapPropagator(), + }, + { + name: "with-configuration", + cfg: []ConfigurationOption{ + WithContext(t.Context()), + WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ + TracerProvider: &TracerProvider{}, + MeterProvider: &MeterProvider{}, + LoggerProvider: &LoggerProvider{}, + Propagator: &Propagator{}, + }), + }, + wantTracerProvider: &sdktrace.TracerProvider{}, + wantMeterProvider: &sdkmetric.MeterProvider{}, + wantLoggerProvider: &sdklog.LoggerProvider{}, + wantPropagator: propagation.NewCompositeTextMapPropagator(), + }, + { + name: "with-sdk-disabled", + cfg: []ConfigurationOption{ + WithContext(t.Context()), + WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ + Disabled: ptr(true), + TracerProvider: &TracerProvider{}, + MeterProvider: &MeterProvider{}, + LoggerProvider: &LoggerProvider{}, + }), + }, + wantTracerProvider: tracenoop.NewTracerProvider(), + wantMeterProvider: metricnoop.NewMeterProvider(), + wantLoggerProvider: lognoop.NewLoggerProvider(), + wantPropagator: propagation.NewCompositeTextMapPropagator(), + }, + } + for _, tt := range tests { + sdk, err := NewSDK(tt.cfg...) + require.Equal(t, tt.wantErr, err) + assert.IsType(t, tt.wantTracerProvider, sdk.TracerProvider()) + assert.IsType(t, tt.wantMeterProvider, sdk.MeterProvider()) + assert.IsType(t, tt.wantLoggerProvider, sdk.LoggerProvider()) + assert.IsType(t, tt.wantPropagator, sdk.Propagator()) + require.Equal(t, tt.wantShutdownErr, sdk.Shutdown(t.Context())) + } +} + +func TestNewSDKWithEnvVar(t *testing.T) { + cfg := []ConfigurationOption{ + WithContext(t.Context()), + WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ + TracerProvider: nil, + }), + } + // test a non existent file + t.Setenv(envVarConfigFile, filepath.Join("..", "testdata", "file_missing.yaml")) + _, err := NewSDK(cfg...) + require.Error(t, err) + // test a file that causes a parse error + t.Setenv(envVarConfigFile, filepath.Join("..", "testdata", "v1.0.0_invalid_nil_name.yaml")) + _, err = NewSDK(cfg...) + require.Error(t, err) + require.ErrorIs(t, err, newErrRequired(&NameStringValuePair{}, "name")) + // test a valid file, error is returned from the SDK instantiation + t.Setenv(envVarConfigFile, filepath.Join("..", "testdata", "v1.0.0.yaml")) + _, err = NewSDK(cfg...) + require.ErrorIs(t, err, newErrInvalid("otlp_file/development")) +} + +var v10OpenTelemetryConfig = OpenTelemetryConfiguration{ + Disabled: ptr(false), + FileFormat: "1.0-rc.2", + AttributeLimits: &AttributeLimits{ + AttributeCountLimit: ptr(128), + AttributeValueLengthLimit: ptr(4096), + }, + InstrumentationDevelopment: &ExperimentalInstrumentation{ + Cpp: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Dotnet: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Erlang: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + General: &ExperimentalGeneralInstrumentation{ + Http: &ExperimentalHttpInstrumentation{ + Client: &ExperimentalHttpClientInstrumentation{ + RequestCapturedHeaders: []string{"Content-Type", "Accept"}, + ResponseCapturedHeaders: []string{"Content-Type", "Content-Encoding"}, + }, + Server: &ExperimentalHttpServerInstrumentation{ + RequestCapturedHeaders: []string{"Content-Type", "Accept"}, + ResponseCapturedHeaders: []string{"Content-Type", "Content-Encoding"}, + }, + }, + Peer: &ExperimentalPeerInstrumentation{ + ServiceMapping: []ExperimentalPeerServiceMapping{ + {Peer: "1.2.3.4", Service: "FooService"}, + {Peer: "2.3.4.5", Service: "BarService"}, + }, + }, + }, + Go: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Java: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Js: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Php: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Python: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Ruby: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Rust: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + Swift: ExperimentalLanguageSpecificInstrumentation{ + "example": map[string]any{ + "property": "value", + }, + }, + }, + LogLevel: ptr(SeverityNumberInfo), + LoggerProvider: &LoggerProvider{ + LoggerConfiguratorDevelopment: &ExperimentalLoggerConfigurator{ + DefaultConfig: &ExperimentalLoggerConfig{ + Disabled: ptr(true), + }, + Loggers: []ExperimentalLoggerMatcherAndConfig{ + { + Config: ExperimentalLoggerConfig{ + Disabled: ptr(false), + }, + Name: "io.opentelemetry.contrib.*", + }, + }, + }, + Limits: &LogRecordLimits{ + AttributeCountLimit: ptr(128), + AttributeValueLengthLimit: ptr(4096), + }, + Processors: []LogRecordProcessor{ + { + Batch: &BatchLogRecordProcessor{ + ExportTimeout: ptr(30000), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Tls: &HttpTls{ + CaFile: ptr("testdata/ca.crt"), + CertFile: ptr("testdata/client.crt"), + KeyFile: ptr("testdata/client.key"), + }, + Compression: ptr("gzip"), + Encoding: ptr(OTLPHttpEncodingProtobuf), + Endpoint: ptr("http://localhost:4318/v1/logs"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + Timeout: ptr(10000), + }, + }, + MaxExportBatchSize: ptr(512), + MaxQueueSize: ptr(2048), + ScheduleDelay: ptr(5000), + }, + }, + { + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Tls: &GrpcTls{ + CaFile: ptr("testdata/ca.crt"), + CertFile: ptr("testdata/client.crt"), + KeyFile: ptr("testdata/client.key"), + Insecure: ptr(false), + }, + Compression: ptr("gzip"), + Endpoint: ptr("http://localhost:4317"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + Timeout: ptr(10000), + }, + }, + }, + }, + { + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileExporter{ + OutputStream: ptr("file:///var/log/logs.jsonl"), + }, + }, + }, + }, + { + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileExporter{ + OutputStream: ptr("stdout"), + }, + }, + }, + }, + { + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + }, + }, + MeterProvider: &MeterProvider{ + ExemplarFilter: ptr(ExemplarFilter("trace_based")), + MeterConfiguratorDevelopment: &ExperimentalMeterConfigurator{ + DefaultConfig: &ExperimentalMeterConfig{ + Disabled: ptr(true), + }, + Meters: []ExperimentalMeterMatcherAndConfig{ + { + Config: ExperimentalMeterConfig{ + Disabled: ptr(false), + }, + Name: "io.opentelemetry.contrib.*", + }, + }, + }, + Readers: []MetricReader{ + { + Pull: &PullMetricReader{ + Producers: []MetricProducer{ + { + Opencensus: OpenCensusMetricProducer{}, + }, + }, + CardinalityLimits: &CardinalityLimits{ + Default: ptr(2000), + Counter: ptr(2000), + Gauge: ptr(2000), + Histogram: ptr(2000), + ObservableCounter: ptr(2000), + ObservableGauge: ptr(2000), + ObservableUpDownCounter: ptr(2000), + UpDownCounter: ptr(2000), + }, + Exporter: PullMetricExporter{ + PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{ + Host: ptr("localhost"), + Port: ptr(9464), + TranslationStrategy: ptr(ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithSuffixes), + WithResourceConstantLabels: &IncludeExclude{ + Excluded: []string{"service.attr1"}, + Included: []string{"service*"}, + }, + WithoutScopeInfo: ptr(false), + }, + }, + }, + }, + { + Periodic: &PeriodicMetricReader{ + Producers: []MetricProducer{ + { + AdditionalProperties: map[string]any{ + "prometheus": nil, + }, + }, + }, + CardinalityLimits: &CardinalityLimits{ + Default: ptr(2000), + Counter: ptr(2000), + Gauge: ptr(2000), + Histogram: ptr(2000), + ObservableCounter: ptr(2000), + ObservableGauge: ptr(2000), + ObservableUpDownCounter: ptr(2000), + UpDownCounter: ptr(2000), + }, + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Tls: &HttpTls{ + CaFile: ptr("testdata/ca.crt"), + CertFile: ptr("testdata/client.crt"), + KeyFile: ptr("testdata/client.key"), + }, + Compression: ptr("gzip"), + DefaultHistogramAggregation: ptr(ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram), + Endpoint: ptr("http://localhost:4318/v1/metrics"), + Encoding: ptr(OTLPHttpEncodingProtobuf), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), + Timeout: ptr(10000), + }, + }, + Interval: ptr(60000), + Timeout: ptr(30000), + }, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Tls: &GrpcTls{ + CaFile: ptr("testdata/ca.crt"), + CertFile: ptr("testdata/client.crt"), + KeyFile: ptr("testdata/client.key"), + Insecure: ptr(false), + }, + Compression: ptr("gzip"), + DefaultHistogramAggregation: ptr(ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram), + Endpoint: ptr("http://localhost:4317"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), + Timeout: ptr(10000), + }, + }, + }, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileMetricExporter{ + OutputStream: ptr("file:///var/log/metrics.jsonl"), + DefaultHistogramAggregation: ptr(ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram), + TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), + }, + }, + }, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileMetricExporter{ + OutputStream: ptr("stdout"), + DefaultHistogramAggregation: ptr(ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram), + TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), + }, + }, + }, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + Console: &ConsoleMetricExporter{}, + }, + }, + }, + }, + Views: []View{ + { + Selector: ViewSelector{ + InstrumentName: ptr("my-instrument"), + InstrumentType: ptr(InstrumentTypeHistogram), + MeterName: ptr("my-meter"), + MeterSchemaUrl: ptr("https://opentelemetry.io/schemas/1.16.0"), + MeterVersion: ptr("1.0.0"), + Unit: ptr("ms"), + }, + Stream: ViewStream{ + Aggregation: &Aggregation{ + ExplicitBucketHistogram: &ExplicitBucketHistogramAggregation{ + Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + RecordMinMax: ptr(true), + }, + }, + AggregationCardinalityLimit: ptr(2000), + AttributeKeys: &IncludeExclude{ + Included: []string{"key1", "key2"}, + Excluded: []string{"key3"}, + }, + Description: ptr("new_description"), + Name: ptr("new_instrument_name"), + }, + }, + }, + }, + Propagator: &Propagator{ + Composite: []TextMapPropagator{ + { + Tracecontext: TraceContextPropagator{}, + }, + { + Baggage: BaggagePropagator{}, + }, + { + B3: B3Propagator{}, + }, + { + B3Multi: B3MultiPropagator{}, + }, + { + Jaeger: JaegerPropagator{}, + }, + { + Ottrace: OpenTracingPropagator{}, + }, + }, + CompositeList: ptr("tracecontext,baggage,b3,b3multi,jaeger,ottrace,xray"), + }, + Resource: &Resource{ + Attributes: []AttributeNameValue{ + {Name: "service.name", Value: "unknown_service"}, + {Name: "string_key", Type: ptr(AttributeTypeString), Value: "value"}, + {Name: "bool_key", Type: ptr(AttributeTypeBool), Value: true}, + {Name: "int_key", Type: ptr(AttributeTypeInt), Value: 1}, + {Name: "double_key", Type: ptr(AttributeTypeDouble), Value: 1.1}, + {Name: "string_array_key", Type: ptr(AttributeTypeStringArray), Value: []any{"value1", "value2"}}, + {Name: "bool_array_key", Type: ptr(AttributeTypeBoolArray), Value: []any{true, false}}, + {Name: "int_array_key", Type: ptr(AttributeTypeIntArray), Value: []any{1, 2}}, + {Name: "double_array_key", Type: ptr(AttributeTypeDoubleArray), Value: []any{1.1, 2.2}}, + }, + AttributesList: ptr("service.namespace=my-namespace,service.version=1.0.0"), + DetectionDevelopment: &ExperimentalResourceDetection{ + Attributes: &IncludeExclude{ + Excluded: []string{"process.command_args"}, + Included: []string{"process.*"}, + }, + Detectors: []ExperimentalResourceDetector{ + {Container: ExperimentalContainerResourceDetector{}}, + {Host: ExperimentalHostResourceDetector{}}, + {Process: ExperimentalProcessResourceDetector{}}, + {Service: ExperimentalServiceResourceDetector{}}, + }, + }, + }, + TracerProvider: &TracerProvider{ + TracerConfiguratorDevelopment: &ExperimentalTracerConfigurator{ + DefaultConfig: &ExperimentalTracerConfig{ + Disabled: ptr(true), + }, + Tracers: []ExperimentalTracerMatcherAndConfig{ + { + Config: ExperimentalTracerConfig{ + Disabled: ptr(false), + }, + Name: "io.opentelemetry.contrib.*", + }, + }, + }, + + Limits: &SpanLimits{ + AttributeCountLimit: ptr(128), + AttributeValueLengthLimit: ptr(4096), + EventCountLimit: ptr(128), + EventAttributeCountLimit: ptr(128), + LinkCountLimit: ptr(128), + LinkAttributeCountLimit: ptr(128), + }, + Processors: []SpanProcessor{ + { + Batch: &BatchSpanProcessor{ + ExportTimeout: ptr(30000), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Tls: &HttpTls{ + CaFile: ptr("testdata/ca.crt"), + CertFile: ptr("testdata/client.crt"), + KeyFile: ptr("testdata/client.key"), + }, + Compression: ptr("gzip"), + Encoding: ptr(OTLPHttpEncodingProtobuf), + Endpoint: ptr("http://localhost:4318/v1/traces"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + Timeout: ptr(10000), + }, + }, + MaxExportBatchSize: ptr(512), + MaxQueueSize: ptr(2048), + ScheduleDelay: ptr(5000), + }, + }, + { + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Tls: &GrpcTls{ + CaFile: ptr("testdata/ca.crt"), + CertFile: ptr("testdata/client.crt"), + KeyFile: ptr("testdata/client.key"), + Insecure: ptr(false), + }, + Compression: ptr("gzip"), + Endpoint: ptr("http://localhost:4317"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, + }, + HeadersList: ptr("api-key=1234"), + Timeout: ptr(10000), + }, + }, + }, + }, + { + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileExporter{ + OutputStream: ptr("file:///var/log/traces.jsonl"), + }, + }, + }, + }, + { + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileExporter{ + OutputStream: ptr("stdout"), + }, + }, + }, + }, + { + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + }, + Sampler: &Sampler{ + ParentBased: &ParentBasedSampler{ + LocalParentNotSampled: &Sampler{ + AlwaysOff: AlwaysOffSampler{}, + }, + LocalParentSampled: &Sampler{ + AlwaysOn: AlwaysOnSampler{}, + }, + RemoteParentNotSampled: &Sampler{ + AlwaysOff: AlwaysOffSampler{}, + }, + RemoteParentSampled: &Sampler{ + AlwaysOn: AlwaysOnSampler{}, + }, + Root: &Sampler{ + TraceIDRatioBased: &TraceIDRatioBasedSampler{ + Ratio: ptr(0.0001), + }, + }, + }, + }, + }, +} + +var v100OpenTelemetryConfigEnvParsing = OpenTelemetryConfiguration{ + Disabled: ptr(false), + FileFormat: "1.0", + LogLevel: ptr(SeverityNumberInfo), + AttributeLimits: &AttributeLimits{ + AttributeCountLimit: ptr(128), + AttributeValueLengthLimit: ptr(4096), + }, + Resource: &Resource{ + Attributes: []AttributeNameValue{ + {Name: "service.name", Value: "unknown_service"}, + {Name: "string_key", Type: ptr(AttributeTypeString), Value: "value"}, + {Name: "bool_key", Type: ptr(AttributeTypeBool), Value: true}, + {Name: "int_key", Type: ptr(AttributeTypeInt), Value: 1}, + {Name: "double_key", Type: ptr(AttributeTypeDouble), Value: 1.1}, + {Name: "string_array_key", Type: ptr(AttributeTypeStringArray), Value: []any{"value1", "value2"}}, + {Name: "bool_array_key", Type: ptr(AttributeTypeBoolArray), Value: []any{true, false}}, + {Name: "int_array_key", Type: ptr(AttributeTypeIntArray), Value: []any{1, 2}}, + {Name: "double_array_key", Type: ptr(AttributeTypeDoubleArray), Value: []any{1.1, 2.2}}, + {Name: "string_value", Type: ptr(AttributeTypeString), Value: "value"}, + {Name: "bool_value", Type: ptr(AttributeTypeBool), Value: true}, + {Name: "int_value", Type: ptr(AttributeTypeInt), Value: 1}, + {Name: "float_value", Type: ptr(AttributeTypeDouble), Value: 1.1}, + {Name: "hex_value", Type: ptr(AttributeTypeInt), Value: int(48879)}, + {Name: "quoted_string_value", Type: ptr(AttributeTypeString), Value: "value"}, + {Name: "quoted_bool_value", Type: ptr(AttributeTypeString), Value: "true"}, + {Name: "quoted_int_value", Type: ptr(AttributeTypeString), Value: "1"}, + {Name: "quoted_float_value", Type: ptr(AttributeTypeString), Value: "1.1"}, + {Name: "quoted_hex_value", Type: ptr(AttributeTypeString), Value: "0xbeef"}, + {Name: "alternative_env_syntax", Type: ptr(AttributeTypeString), Value: "value"}, + {Name: "invalid_map_value", Type: ptr(AttributeTypeString), Value: "value\nkey:value"}, + {Name: "multiple_references_inject", Type: ptr(AttributeTypeString), Value: "foo value 1.1"}, + {Name: "undefined_key", Type: ptr(AttributeTypeString), Value: nil}, + {Name: "undefined_key_fallback", Type: ptr(AttributeTypeString), Value: "fallback"}, + {Name: "env_var_in_key", Type: ptr(AttributeTypeString), Value: "value"}, + {Name: "replace_me", Type: ptr(AttributeTypeString), Value: "${DO_NOT_REPLACE_ME}"}, + {Name: "undefined_defaults_to_var", Type: ptr(AttributeTypeString), Value: "${STRING_VALUE}"}, + {Name: "escaped_does_not_substitute", Type: ptr(AttributeTypeString), Value: "${STRING_VALUE}"}, + {Name: "escaped_does_not_substitute_fallback", Type: ptr(AttributeTypeString), Value: "${STRING_VALUE:-fallback}"}, + {Name: "escaped_and_substituted_fallback", Type: ptr(AttributeTypeString), Value: "${STRING_VALUE:-value}"}, + {Name: "escaped_and_substituted", Type: ptr(AttributeTypeString), Value: "$value"}, + {Name: "multiple_escaped_and_not_substituted", Type: ptr(AttributeTypeString), Value: "$${STRING_VALUE}"}, + {Name: "undefined_key_with_escape_sequence_in_fallback", Type: ptr(AttributeTypeString), Value: "${UNDEFINED_KEY}"}, + {Name: "value_with_escape", Type: ptr(AttributeTypeString), Value: "value$$"}, + {Name: "escape_sequence", Type: ptr(AttributeTypeString), Value: "a $ b"}, + {Name: "no_escape_sequence", Type: ptr(AttributeTypeString), Value: "a $ b"}, + }, + AttributesList: ptr("service.namespace=my-namespace,service.version=1.0.0"), + // Detectors: &Detectors{ + // Attributes: &DetectorsAttributes{ + // Excluded: []string{"process.command_args"}, + // Included: []string{"process.*"}, + // }, + // }, + SchemaUrl: ptr("https://opentelemetry.io/schemas/1.16.0"), + }, +} + +func TestParseFiles(t *testing.T) { + tests := []struct { + name string + input string + wantErr error + wantType *OpenTelemetryConfiguration + }{ + { + name: "invalid nil name", + input: "v1.0.0_invalid_nil_name", + wantErr: newErrRequired(&NameStringValuePair{}, "name"), + wantType: &OpenTelemetryConfiguration{}, + }, + { + name: "invalid nil value", + input: "v1.0.0_invalid_nil_value", + wantErr: newErrRequired(&NameStringValuePair{}, "value"), + wantType: &OpenTelemetryConfiguration{}, + }, + { + name: "valid v0.2 config", + input: "v0.2", + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + wantType: &OpenTelemetryConfiguration{}, + }, + { + name: "valid v0.3 config", + input: "v0.3", + wantErr: newErrUnmarshal(&TextMapPropagator{}), + wantType: &OpenTelemetryConfiguration{}, + }, + { + name: "valid v1.0.0 config", + input: "v1.0.0", + wantType: &v10OpenTelemetryConfig, + }, + } + + for _, tt := range tests { + t.Run("yaml:"+tt.name, func(t *testing.T) { + b, err := os.ReadFile(filepath.Join("..", "testdata", fmt.Sprintf("%s.yaml", tt.input))) + require.NoError(t, err) + + got, err := ParseYAML(b) + require.ErrorIs(t, err, tt.wantErr) + if tt.wantErr == nil { + assert.Equal(t, tt.wantType, got) + } + }) + t.Run("json: "+tt.name, func(t *testing.T) { + b, err := os.ReadFile(filepath.Join("..", "testdata", fmt.Sprintf("%s.json", tt.input))) + require.NoError(t, err) + + var got OpenTelemetryConfiguration + err = json.Unmarshal(b, &got) + require.ErrorIs(t, err, tt.wantErr) + assert.Equal(t, tt.wantType, &got) + }) + } +} + +func TestUnmarshalOpenTelemetryConfiguration(t *testing.T) { + tests := []struct { + name string + jsonConfig []byte + yamlConfig []byte + wantErr error + wantType OpenTelemetryConfiguration + }{ + { + name: "valid defaults config", + jsonConfig: []byte(`{"file_format": "1.0"}`), + yamlConfig: []byte("file_format: 1.0"), + wantType: OpenTelemetryConfiguration{ + Disabled: ptr(false), + FileFormat: "1.0", + LogLevel: ptr(SeverityNumberInfo), + }, + }, + { + name: "invalid config missing required file_format", + jsonConfig: []byte(`{"disabled": false}`), + yamlConfig: []byte("disabled: false"), + wantErr: newErrRequired(&OpenTelemetryConfiguration{}, "file_format"), + }, + { + name: "file_format invalid", + jsonConfig: []byte(`{"file_format":[], "disabled": false}`), + yamlConfig: []byte("file_format: []\ndisabled: false"), + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + }, + { + name: "invalid config", + jsonConfig: []byte(`{"file_format": "yaml", "disabled": "notabool"}`), + yamlConfig: []byte("file_format: []\ndisabled: notabool"), + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + }, + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("disabled: []\nconsole: {}\nfile_format: str"), + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + }, + { + name: "resource invalid", + jsonConfig: []byte(`{"resource":[], "file_format": "1.0"}`), + yamlConfig: []byte("resource: []\nfile_format: 1.0"), + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + }, + { + name: "attribute_limits invalid", + jsonConfig: []byte(`{"attribute_limits":[], "file_format": "1.0"}`), + yamlConfig: []byte("attribute_limits: []\nfile_format: 1.0"), + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + }, + { + name: "instrumentation invalid", + jsonConfig: []byte(`{"instrumentation/development":[], "file_format": "1.0"}`), + yamlConfig: []byte("instrumentation/development: []\nfile_format: 1.0"), + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + }, + { + name: "log_level invalid", + jsonConfig: []byte(`{"log_level":[], "file_format": "1.0"}`), + yamlConfig: []byte("log_level: []\nfile_format: 1.0"), + wantErr: newErrUnmarshal(&OpenTelemetryConfiguration{}), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := OpenTelemetryConfiguration{} + err := got.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErr) + assert.Equal(t, tt.wantType, got) + + got = OpenTelemetryConfiguration{} + err = yaml.Unmarshal(tt.yamlConfig, &got) + assert.ErrorIs(t, err, tt.wantErr) + assert.Equal(t, tt.wantType, got) + }) + } +} + +func TestUnmarshalBatchSpanProcessor(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantExporter SpanExporter + }{ + { + name: "valid with null console exporter", + jsonConfig: []byte(`{"exporter":{"console":null}}`), + yamlConfig: []byte("exporter:\n console:\n"), + wantExporter: SpanExporter{Console: ConsoleExporter{}}, + }, + { + name: "valid with console exporter", + jsonConfig: []byte(`{"exporter":{"console":{}}}`), + yamlConfig: []byte("exporter:\n console: {}"), + wantExporter: SpanExporter{Console: ConsoleExporter{}}, + }, + { + name: "valid with all fields positive", + jsonConfig: []byte(`{"exporter":{"console":{}},"export_timeout":5000,"max_export_batch_size":512,"max_queue_size":2048,"schedule_delay":1000}`), + yamlConfig: []byte("exporter:\n console: {}\nexport_timeout: 5000\nmax_export_batch_size: 512\nmax_queue_size: 2048\nschedule_delay: 1000"), + wantExporter: SpanExporter{Console: ConsoleExporter{}}, + }, + { + name: "valid with zero export_timeout", + jsonConfig: []byte(`{"exporter":{"console":{}},"export_timeout":0}`), + yamlConfig: []byte("exporter:\n console: {}\nexport_timeout: 0"), + wantExporter: SpanExporter{Console: ConsoleExporter{}}, + }, + { + name: "valid with zero schedule_delay", + jsonConfig: []byte(`{"exporter":{"console":{}},"schedule_delay":0}`), + yamlConfig: []byte("exporter:\n console: {}\nschedule_delay: 0"), + wantExporter: SpanExporter{Console: ConsoleExporter{}}, + }, + { + name: "missing required exporter field", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + wantErrT: newErrRequired(&BatchSpanProcessor{}, "exporter"), + }, + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("exporter:\n console: {}\nexport_timeout: !!str str"), + wantErrT: newErrUnmarshal(&BatchSpanProcessor{}), + }, + { + name: "invalid export_timeout negative", + jsonConfig: []byte(`{"exporter":{"console":{}},"export_timeout":-1}`), + yamlConfig: []byte("exporter:\n console: {}\nexport_timeout: -1"), + wantErrT: newErrGreaterOrEqualZero("export_timeout"), + }, + { + name: "invalid max_export_batch_size zero", + jsonConfig: []byte(`{"exporter":{"console":{}},"max_export_batch_size":0}`), + yamlConfig: []byte("exporter:\n console: {}\nmax_export_batch_size: 0"), + wantErrT: newErrGreaterThanZero("max_export_batch_size"), + }, + { + name: "invalid max_export_batch_size negative", + jsonConfig: []byte(`{"exporter":{"console":{}},"max_export_batch_size":-1}`), + yamlConfig: []byte("exporter:\n console: {}\nmax_export_batch_size: -1"), + wantErrT: newErrGreaterThanZero("max_export_batch_size"), + }, + { + name: "invalid max_queue_size zero", + jsonConfig: []byte(`{"exporter":{"console":{}},"max_queue_size":0}`), + yamlConfig: []byte("exporter:\n console: {}\nmax_queue_size: 0"), + wantErrT: newErrGreaterThanZero("max_queue_size"), + }, + { + name: "invalid max_queue_size negative", + jsonConfig: []byte(`{"exporter":{"console":{}},"max_queue_size":-1}`), + yamlConfig: []byte("exporter:\n console: {}\nmax_queue_size: -1"), + wantErrT: newErrGreaterThanZero("max_queue_size"), + }, + { + name: "invalid schedule_delay negative", + jsonConfig: []byte(`{"exporter":{"console":{}},"schedule_delay":-1}`), + yamlConfig: []byte("exporter:\n console: {}\nschedule_delay: -1"), + wantErrT: newErrGreaterOrEqualZero("schedule_delay"), + }, + } { + t.Run(tt.name, func(t *testing.T) { + cl := BatchSpanProcessor{} + err := cl.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl.Exporter) + + cl = BatchSpanProcessor{} + err = yaml.Unmarshal(tt.yamlConfig, &cl) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl.Exporter) + }) + } +} + +func TestParseYAMLWithEnvironmentVariables(t *testing.T) { + tests := []struct { + name string + input string + wantErr error + wantType any + }{ + { + name: "valid v1.0.0 config with env vars", + input: "v1.0.0_env_var.yaml", + wantType: &v100OpenTelemetryConfigEnvParsing, + }, + } + + t.Setenv("OTEL_SDK_DISABLED", "false") + t.Setenv("OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", "4096") + t.Setenv("OTEL_EXPORTER_OTLP_PROTOCOL", "http/protobuf") + t.Setenv("STRING_VALUE", "value") + t.Setenv("BOOL_VALUE", "true") + t.Setenv("INT_VALUE", "1") + t.Setenv("FLOAT_VALUE", "1.1") + t.Setenv("HEX_VALUE", "0xbeef") // A valid integer value (i.e. 3735928559) written in hexadecimal + t.Setenv("INVALID_MAP_VALUE", "value\\nkey:value") // An invalid attempt to inject a map key into the YAML + t.Setenv("ENV_VAR_IN_KEY", "env_var_in_key") // An env var in key + t.Setenv("DO_NOT_REPLACE_ME", "Never use this value") // An unused environment variable + t.Setenv("REPLACE_ME", "${DO_NOT_REPLACE_ME}") // A valid replacement text, used verbatim, not replaced with "Never use this value" + t.Setenv("VALUE_WITH_ESCAPE", "value$$") // A valid replacement text, used verbatim, not replaced with "Never use this value" + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, err := os.ReadFile(filepath.Join("..", "testdata", tt.input)) + require.NoError(t, err) + + got, err := ParseYAML(b) + if tt.wantErr != nil { + require.Equal(t, tt.wantErr.Error(), err.Error()) + } else { + require.NoError(t, err) + assert.Equal(t, tt.wantType, got) + } + }) + } +} + +func TestUnmarshalPeriodicMetricReader(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantExporter PushMetricExporter + }{ + { + name: "valid with null console exporter", + jsonConfig: []byte(`{"exporter":{"console":null}}`), + yamlConfig: []byte("exporter:\n console:\n"), + wantExporter: PushMetricExporter{Console: &ConsoleMetricExporter{}}, + }, + { + name: "valid with console exporter", + jsonConfig: []byte(`{"exporter":{"console":{}}}`), + yamlConfig: []byte("exporter:\n console: {}"), + wantExporter: PushMetricExporter{Console: &ConsoleMetricExporter{}}, + }, + { + name: "valid with all fields positive", + jsonConfig: []byte(`{"exporter":{"console":{}},"timeout":5000,"interval":1000}`), + yamlConfig: []byte("exporter:\n console: {}\ntimeout: 5000\ninterval: 1000"), + wantExporter: PushMetricExporter{Console: &ConsoleMetricExporter{}}, + }, + { + name: "valid with zero timeout", + jsonConfig: []byte(`{"exporter":{"console":{}},"timeout":0}`), + yamlConfig: []byte("exporter:\n console: {}\ntimeout: 0"), + wantExporter: PushMetricExporter{Console: &ConsoleMetricExporter{}}, + }, + { + name: "valid with zero interval", + jsonConfig: []byte(`{"exporter":{"console":{}},"interval":0}`), + yamlConfig: []byte("exporter:\n console: {}\ninterval: 0"), + wantExporter: PushMetricExporter{Console: &ConsoleMetricExporter{}}, + }, + { + name: "missing required exporter field", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + wantErrT: newErrRequired(&PeriodicMetricReader{}, "exporter"), + }, + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("exporter:\n console: {}\ntimeout: !!str str"), + wantErrT: newErrUnmarshal(&PeriodicMetricReader{}), + }, + { + name: "invalid timeout negative", + jsonConfig: []byte(`{"exporter":{"console":{}},"timeout":-1}`), + yamlConfig: []byte("exporter:\n console: {}\ntimeout: -1"), + wantErrT: newErrGreaterOrEqualZero("timeout"), + }, + { + name: "invalid interval negative", + jsonConfig: []byte(`{"exporter":{"console":{}},"interval":-1}`), + yamlConfig: []byte("exporter:\n console: {}\ninterval: -1"), + wantErrT: newErrGreaterOrEqualZero("interval"), + }, + } { + t.Run(tt.name, func(t *testing.T) { + pmr := PeriodicMetricReader{} + err := pmr.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, pmr.Exporter) + + pmr = PeriodicMetricReader{} + err = yaml.Unmarshal(tt.yamlConfig, &pmr) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, pmr.Exporter) + }) + } +} + +func TestUnmarshalCardinalityLimits(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + }{ + { + name: "valid with all fields positive", + jsonConfig: []byte(`{"counter":100,"default":200,"gauge":300,"histogram":400,"observable_counter":500,"observable_gauge":600,"observable_up_down_counter":700,"up_down_counter":800}`), + yamlConfig: []byte("counter: 100\ndefault: 200\ngauge: 300\nhistogram: 400\nobservable_counter: 500\nobservable_gauge: 600\nobservable_up_down_counter: 700\nup_down_counter: 800"), + }, + { + name: "valid with single field", + jsonConfig: []byte(`{"default":2000}`), + yamlConfig: []byte("default: 2000"), + }, + { + name: "valid empty", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + }, + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("counter: !!str 2000"), + wantErrT: newErrUnmarshal(&CardinalityLimits{}), + }, + { + name: "invalid counter zero", + jsonConfig: []byte(`{"counter":0}`), + yamlConfig: []byte("counter: 0"), + wantErrT: newErrGreaterThanZero("counter"), + }, + { + name: "invalid counter negative", + jsonConfig: []byte(`{"counter":-1}`), + yamlConfig: []byte("counter: -1"), + wantErrT: newErrGreaterThanZero("counter"), + }, + { + name: "invalid default zero", + jsonConfig: []byte(`{"default":0}`), + yamlConfig: []byte("default: 0"), + wantErrT: newErrGreaterThanZero("default"), + }, + { + name: "invalid default negative", + jsonConfig: []byte(`{"default":-1}`), + yamlConfig: []byte("default: -1"), + wantErrT: newErrGreaterThanZero("default"), + }, + { + name: "invalid gauge zero", + jsonConfig: []byte(`{"gauge":0}`), + yamlConfig: []byte("gauge: 0"), + wantErrT: newErrGreaterThanZero("gauge"), + }, + { + name: "invalid gauge negative", + jsonConfig: []byte(`{"gauge":-1}`), + yamlConfig: []byte("gauge: -1"), + wantErrT: newErrGreaterThanZero("gauge"), + }, + { + name: "invalid histogram zero", + jsonConfig: []byte(`{"histogram":0}`), + yamlConfig: []byte("histogram: 0"), + wantErrT: newErrGreaterThanZero("histogram"), + }, + { + name: "invalid histogram negative", + jsonConfig: []byte(`{"histogram":-1}`), + yamlConfig: []byte("histogram: -1"), + wantErrT: newErrGreaterThanZero("histogram"), + }, + { + name: "invalid observable_counter zero", + jsonConfig: []byte(`{"observable_counter":0}`), + yamlConfig: []byte("observable_counter: 0"), + wantErrT: newErrGreaterThanZero("observable_counter"), + }, + { + name: "invalid observable_counter negative", + jsonConfig: []byte(`{"observable_counter":-1}`), + yamlConfig: []byte("observable_counter: -1"), + wantErrT: newErrGreaterThanZero("observable_counter"), + }, + { + name: "invalid observable_gauge zero", + jsonConfig: []byte(`{"observable_gauge":0}`), + yamlConfig: []byte("observable_gauge: 0"), + wantErrT: newErrGreaterThanZero("observable_gauge"), + }, + { + name: "invalid observable_gauge negative", + jsonConfig: []byte(`{"observable_gauge":-1}`), + yamlConfig: []byte("observable_gauge: -1"), + wantErrT: newErrGreaterThanZero("observable_gauge"), + }, + { + name: "invalid observable_up_down_counter zero", + jsonConfig: []byte(`{"observable_up_down_counter":0}`), + yamlConfig: []byte("observable_up_down_counter: 0"), + wantErrT: newErrGreaterThanZero("observable_up_down_counter"), + }, + { + name: "invalid observable_up_down_counter negative", + jsonConfig: []byte(`{"observable_up_down_counter":-1}`), + yamlConfig: []byte("observable_up_down_counter: -1"), + wantErrT: newErrGreaterThanZero("observable_up_down_counter"), + }, + { + name: "invalid up_down_counter zero", + jsonConfig: []byte(`{"up_down_counter":0}`), + yamlConfig: []byte("up_down_counter: 0"), + wantErrT: newErrGreaterThanZero("up_down_counter"), + }, + { + name: "invalid up_down_counter negative", + jsonConfig: []byte(`{"up_down_counter":-1}`), + yamlConfig: []byte("up_down_counter: -1"), + wantErrT: newErrGreaterThanZero("up_down_counter"), + }, + } { + t.Run(tt.name, func(t *testing.T) { + cl := CardinalityLimits{} + err := cl.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + + cl = CardinalityLimits{} + err = yaml.Unmarshal(tt.yamlConfig, &cl) + assert.ErrorIs(t, err, tt.wantErrT) + }) + } +} + +func TestCreateHeadersConfig(t *testing.T) { + tests := []struct { + name string + headers []NameStringValuePair + headersList *string + wantHeaders map[string]string + wantErr error + }{ + { + name: "no headers", + headers: []NameStringValuePair{}, + headersList: nil, + wantHeaders: map[string]string{}, + }, + { + name: "headerslist only", + headers: []NameStringValuePair{}, + headersList: ptr("a=b,c=d"), + wantHeaders: map[string]string{ + "a": "b", + "c": "d", + }, + }, + { + name: "headers only", + headers: []NameStringValuePair{ + { + Name: "a", + Value: ptr("b"), + }, + { + Name: "c", + Value: ptr("d"), + }, + }, + headersList: nil, + wantHeaders: map[string]string{ + "a": "b", + "c": "d", + }, + }, + { + name: "both headers and headerslist", + headers: []NameStringValuePair{ + { + Name: "a", + Value: ptr("b"), + }, + }, + headersList: ptr("c=d"), + wantHeaders: map[string]string{ + "a": "b", + "c": "d", + }, + }, + { + name: "headers supersedes headerslist", + headers: []NameStringValuePair{ + { + Name: "a", + Value: ptr("b"), + }, + { + Name: "c", + Value: ptr("override"), + }, + }, + headersList: ptr("c=d"), + wantHeaders: map[string]string{ + "a": "b", + "c": "override", + }, + }, + { + name: "invalid headerslist", + headersList: ptr("==="), + wantErr: newErrInvalid("invalid headers_list"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + headersMap, err := createHeadersConfig(tt.headers, tt.headersList) + require.ErrorIs(t, err, tt.wantErr) + require.Equal(t, tt.wantHeaders, headersMap) + }) + } +} + +func TestUnmarshalSpanLimits(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + }{ + { + name: "valid with all fields positive", + jsonConfig: []byte(`{"attribute_count_limit":100,"attribute_value_length_limit":200,"event_attribute_count_limit":300,"event_count_limit":400,"link_attribute_count_limit":500,"link_count_limit":600}`), + yamlConfig: []byte("attribute_count_limit: 100\nattribute_value_length_limit: 200\nevent_attribute_count_limit: 300\nevent_count_limit: 400\nlink_attribute_count_limit: 500\nlink_count_limit: 600"), + }, + { + name: "valid with single field", + jsonConfig: []byte(`{"attribute_value_length_limit":2000}`), + yamlConfig: []byte("attribute_value_length_limit: 2000"), + }, + { + name: "valid empty", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + }, + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("attribute_count_limit: !!str 2000"), + wantErrT: newErrUnmarshal(&SpanLimits{}), + }, + { + name: "invalid attribute_count_limit negative", + jsonConfig: []byte(`{"attribute_count_limit":-1}`), + yamlConfig: []byte("attribute_count_limit: -1"), + wantErrT: newErrGreaterOrEqualZero("attribute_count_limit"), + }, + { + name: "invalid attribute_value_length_limit negative", + jsonConfig: []byte(`{"attribute_value_length_limit":-1}`), + yamlConfig: []byte("attribute_value_length_limit: -1"), + wantErrT: newErrGreaterOrEqualZero("attribute_value_length_limit"), + }, + { + name: "invalid event_attribute_count_limit negative", + jsonConfig: []byte(`{"event_attribute_count_limit":-1}`), + yamlConfig: []byte("event_attribute_count_limit: -1"), + wantErrT: newErrGreaterOrEqualZero("event_attribute_count_limit"), + }, + { + name: "invalid event_count_limit negative", + jsonConfig: []byte(`{"event_count_limit":-1}`), + yamlConfig: []byte("event_count_limit: -1"), + wantErrT: newErrGreaterOrEqualZero("event_count_limit"), + }, + { + name: "invalid link_attribute_count_limit negative", + jsonConfig: []byte(`{"link_attribute_count_limit":-1}`), + yamlConfig: []byte("link_attribute_count_limit: -1"), + wantErrT: newErrGreaterOrEqualZero("link_attribute_count_limit"), + }, + { + name: "invalid link_count_limit negative", + jsonConfig: []byte(`{"link_count_limit":-1}`), + yamlConfig: []byte("link_count_limit: -1"), + wantErrT: newErrGreaterOrEqualZero("link_count_limit"), + }, + } { + t.Run(tt.name, func(t *testing.T) { + cl := SpanLimits{} + err := cl.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + + cl = SpanLimits{} + err = yaml.Unmarshal(tt.yamlConfig, &cl) + assert.ErrorIs(t, err, tt.wantErrT) + }) + } +} + +func TestUnmarshalOTLPHttpExporter(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantExporter OTLPHttpExporter + }{ + { + name: "valid with exporter", + jsonConfig: []byte(`{"endpoint":"localhost:4318"}`), + yamlConfig: []byte("endpoint: localhost:4318\n"), + wantExporter: OTLPHttpExporter{Endpoint: ptr("localhost:4318")}, + }, + { + name: "missing required endpoint field", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + wantErrT: newErrRequired(&OTLPHttpExporter{}, "endpoint"), + }, + { + name: "valid with zero timeout", + jsonConfig: []byte(`{"endpoint":"localhost:4318", "timeout":0}`), + yamlConfig: []byte("endpoint: localhost:4318\ntimeout: 0"), + wantExporter: OTLPHttpExporter{Endpoint: ptr("localhost:4318"), Timeout: ptr(0)}, + }, + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("endpoint: localhost:4318\ntimeout: !!str str"), + wantErrT: newErrUnmarshal(&OTLPHttpExporter{}), + }, + { + name: "invalid timeout negative", + jsonConfig: []byte(`{"endpoint":"localhost:4318", "timeout":-1}`), + yamlConfig: []byte("endpoint: localhost:4318\ntimeout: -1"), + wantErrT: newErrGreaterOrEqualZero("timeout"), + }, + } { + t.Run(tt.name, func(t *testing.T) { + cl := OTLPHttpExporter{} + err := cl.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl) + + cl = OTLPHttpExporter{} + err = yaml.Unmarshal(tt.yamlConfig, &cl) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl) + }) + } +} + +func TestUnmarshalOTLPGrpcExporter(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantExporter OTLPGrpcExporter + }{ + { + name: "valid with exporter", + jsonConfig: []byte(`{"endpoint":"localhost:4318"}`), + yamlConfig: []byte("endpoint: localhost:4318\n"), + wantExporter: OTLPGrpcExporter{Endpoint: ptr("localhost:4318")}, + }, + { + name: "missing required endpoint field", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + wantErrT: newErrRequired(&OTLPGrpcExporter{}, "endpoint"), + }, + { + name: "valid with zero timeout", + jsonConfig: []byte(`{"endpoint":"localhost:4318", "timeout":0}`), + yamlConfig: []byte("endpoint: localhost:4318\ntimeout: 0"), + wantExporter: OTLPGrpcExporter{Endpoint: ptr("localhost:4318"), Timeout: ptr(0)}, + }, + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("endpoint: localhost:4318\ntimeout: !!str str"), + wantErrT: newErrUnmarshal(&OTLPGrpcExporter{}), + }, + { + name: "invalid timeout negative", + jsonConfig: []byte(`{"endpoint":"localhost:4318", "timeout":-1}`), + yamlConfig: []byte("endpoint: localhost:4318\ntimeout: -1"), + wantErrT: newErrGreaterOrEqualZero("timeout"), + }, + } { + t.Run(tt.name, func(t *testing.T) { + cl := OTLPGrpcExporter{} + err := cl.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl) + + cl = OTLPGrpcExporter{} + err = yaml.Unmarshal(tt.yamlConfig, &cl) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl) + }) + } +} + +func TestUnmarshalOTLPHttpMetricExporter(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantExporter OTLPHttpMetricExporter + }{ + { + name: "valid with exporter", + jsonConfig: []byte(`{"endpoint":"localhost:4318"}`), + yamlConfig: []byte("endpoint: localhost:4318\n"), + wantExporter: OTLPHttpMetricExporter{Endpoint: ptr("localhost:4318")}, + }, + { + name: "missing required endpoint field", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + wantErrT: newErrRequired(&OTLPHttpMetricExporter{}, "endpoint"), + }, + { + name: "valid with zero timeout", + jsonConfig: []byte(`{"endpoint":"localhost:4318", "timeout":0}`), + yamlConfig: []byte("endpoint: localhost:4318\ntimeout: 0"), + wantExporter: OTLPHttpMetricExporter{Endpoint: ptr("localhost:4318"), Timeout: ptr(0)}, + }, + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("endpoint: localhost:4318\ntimeout: !!str str"), + wantErrT: newErrUnmarshal(&OTLPHttpMetricExporter{}), + }, + { + name: "invalid timeout negative", + jsonConfig: []byte(`{"endpoint":"localhost:4318", "timeout":-1}`), + yamlConfig: []byte("endpoint: localhost:4318\ntimeout: -1"), + wantErrT: newErrGreaterOrEqualZero("timeout"), + }, + } { + t.Run(tt.name, func(t *testing.T) { + cl := OTLPHttpMetricExporter{} + err := cl.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl) + + cl = OTLPHttpMetricExporter{} + err = yaml.Unmarshal(tt.yamlConfig, &cl) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl) + }) + } +} + +func TestUnmarshalOTLPGrpcMetricExporter(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantExporter OTLPGrpcMetricExporter + }{ + { + name: "valid with exporter", + jsonConfig: []byte(`{"endpoint":"localhost:4318"}`), + yamlConfig: []byte("endpoint: localhost:4318\n"), + wantExporter: OTLPGrpcMetricExporter{Endpoint: ptr("localhost:4318")}, + }, + { + name: "missing required endpoint field", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + wantErrT: newErrRequired(&OTLPGrpcMetricExporter{}, "endpoint"), + }, + { + name: "valid with zero timeout", + jsonConfig: []byte(`{"endpoint":"localhost:4318", "timeout":0}`), + yamlConfig: []byte("endpoint: localhost:4318\ntimeout: 0"), + wantExporter: OTLPGrpcMetricExporter{Endpoint: ptr("localhost:4318"), Timeout: ptr(0)}, + }, + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("endpoint: localhost:4318\ntimeout: !!str str"), + wantErrT: newErrUnmarshal(&OTLPGrpcMetricExporter{}), + }, + { + name: "invalid timeout negative", + jsonConfig: []byte(`{"endpoint":"localhost:4318", "timeout":-1}`), + yamlConfig: []byte("endpoint: localhost:4318\ntimeout: -1"), + wantErrT: newErrGreaterOrEqualZero("timeout"), + }, + } { + t.Run(tt.name, func(t *testing.T) { + cl := OTLPGrpcMetricExporter{} + err := cl.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl) + + cl = OTLPGrpcMetricExporter{} + err = yaml.Unmarshal(tt.yamlConfig, &cl) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl) + }) + } +} + +func TestUnmarshalAttributeNameValueType(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantAttributeNameValue AttributeNameValue + }{ + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("name: []\nvalue: true\ntype: bool\n"), + wantErrT: newErrUnmarshal(&AttributeNameValue{}), + }, + { + name: "missing required name field", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + wantErrT: newErrRequired(&AttributeNameValue{}, "name"), + }, + { + name: "missing required value field", + jsonConfig: []byte(`{"name":"test"}`), + yamlConfig: []byte("name: test"), + wantErrT: newErrRequired(&AttributeNameValue{}, "value"), + }, + { + name: "valid string value", + jsonConfig: []byte(`{"name":"test", "value": "test-val", "type": "string"}`), + yamlConfig: []byte("name: test\nvalue: test-val\ntype: string\n"), + wantAttributeNameValue: AttributeNameValue{ + Name: "test", + Value: "test-val", + Type: ptr(AttributeTypeString), + }, + }, + { + name: "valid string_array value", + jsonConfig: []byte(`{"name":"test", "value": ["test-val", "test-val-2"], "type": "string_array"}`), + yamlConfig: []byte("name: test\nvalue: [test-val, test-val-2]\ntype: string_array\n"), + wantAttributeNameValue: AttributeNameValue{ + Name: "test", + Value: []any{"test-val", "test-val-2"}, + Type: ptr(AttributeTypeStringArray), + }, + }, + { + name: "valid bool value", + jsonConfig: []byte(`{"name":"test", "value": true, "type": "bool"}`), + yamlConfig: []byte("name: test\nvalue: true\ntype: bool\n"), + wantAttributeNameValue: AttributeNameValue{ + Name: "test", + Value: true, + Type: ptr(AttributeTypeBool), + }, + }, + { + name: "valid string_array value", + jsonConfig: []byte(`{"name":"test", "value": ["test-val", "test-val-2"], "type": "string_array"}`), + yamlConfig: []byte("name: test\nvalue: [test-val, test-val-2]\ntype: string_array\n"), + wantAttributeNameValue: AttributeNameValue{ + Name: "test", + Value: []any{"test-val", "test-val-2"}, + Type: ptr(AttributeTypeStringArray), + }, + }, + { + name: "valid int value", + jsonConfig: []byte(`{"name":"test", "value": 1, "type": "int"}`), + yamlConfig: []byte("name: test\nvalue: 1\ntype: int\n"), + wantAttributeNameValue: AttributeNameValue{ + Name: "test", + Value: int(1), + Type: ptr(AttributeTypeInt), + }, + }, + { + name: "valid int_array value", + jsonConfig: []byte(`{"name":"test", "value": [1, 2], "type": "int_array"}`), + yamlConfig: []byte("name: test\nvalue: [1, 2]\ntype: int_array\n"), + wantAttributeNameValue: AttributeNameValue{ + Name: "test", + Value: []any{1, 2}, + Type: ptr(AttributeTypeIntArray), + }, + }, + { + name: "valid double value", + jsonConfig: []byte(`{"name":"test", "value": 1, "type": "double"}`), + yamlConfig: []byte("name: test\nvalue: 1\ntype: double\n"), + wantAttributeNameValue: AttributeNameValue{ + Name: "test", + Value: float64(1), + Type: ptr(AttributeTypeDouble), + }, + }, + { + name: "valid double_array value", + jsonConfig: []byte(`{"name":"test", "value": [1, 2], "type": "double_array"}`), + yamlConfig: []byte("name: test\nvalue: [1.0, 2.0]\ntype: double_array\n"), + wantAttributeNameValue: AttributeNameValue{ + Name: "test", + Value: []any{float64(1), float64(2)}, + Type: ptr(AttributeTypeDoubleArray), + }, + }, + { + name: "invalid type", + jsonConfig: []byte(`{"name":"test", "value": 1, "type": "float"}`), + yamlConfig: []byte("name: test\nvalue: 1\ntype: float\n"), + wantErrT: newErrInvalid("unexpected value type"), + }, + } { + t.Run(tt.name, func(t *testing.T) { + val := AttributeNameValue{} + err := val.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantAttributeNameValue, val) + + val = AttributeNameValue{} + err = yaml.Unmarshal(tt.yamlConfig, &val) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantAttributeNameValue, val) + }) + } +} + +func TestUnmarshalNameStringValuePairType(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantNameStringValuePair NameStringValuePair + }{ + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("name: []\nvalue: true\ntype: bool\n"), + wantErrT: newErrUnmarshal(&NameStringValuePair{}), + }, + { + name: "missing required name field", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + wantErrT: newErrRequired(&NameStringValuePair{}, "name"), + }, + { + name: "missing required value field", + jsonConfig: []byte(`{"name":"test"}`), + yamlConfig: []byte("name: test"), + wantErrT: newErrRequired(&NameStringValuePair{}, "value"), + }, + { + name: "invalid array name", + jsonConfig: []byte(`{"name":[], "value": ["test-val", "test-val-2"], "type": "string_array"}`), + yamlConfig: []byte("name: []\nvalue: [test-val, test-val-2]\ntype: string_array\n"), + wantErrT: newErrUnmarshal(&NameStringValuePair{}), + }, + { + name: "valid string value", + jsonConfig: []byte(`{"name":"test", "value": "test-val", "type": "string"}`), + yamlConfig: []byte("name: test\nvalue: test-val\ntype: string\n"), + wantNameStringValuePair: NameStringValuePair{ + Name: "test", + Value: ptr("test-val"), + }, + }, + { + name: "invalid string_array value", + jsonConfig: []byte(`{"name":"test", "value": ["test-val", "test-val-2"], "type": "string_array"}`), + yamlConfig: []byte("name: test\nvalue: [test-val, test-val-2]\ntype: string_array\n"), + wantErrT: newErrUnmarshal(&NameStringValuePair{}), + }, + } { + t.Run(tt.name, func(t *testing.T) { + val := NameStringValuePair{} + err := val.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantNameStringValuePair, val) + + val = NameStringValuePair{} + err = yaml.Unmarshal(tt.yamlConfig, &val) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantNameStringValuePair, val) + }) + } +} + +func TestUnmarshalInstrumentType(t *testing.T) { + var instrumentType InstrumentType + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantInstrumentType InstrumentType + }{ + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("name: []\nvalue: true\ntype: bool\n"), + wantErrT: newErrUnmarshal(&instrumentType), + }, + { + name: "invalid instrument type", + jsonConfig: []byte(`"test"`), + yamlConfig: []byte("test"), + wantErrT: newErrInvalid(`invalid selector (expected one of []interface {}{"counter", "gauge", "histogram", "observable_counter", "observable_gauge", "observable_up_down_counter", "up_down_counter"}): "test""`), + }, + { + name: "valid instrument type", + jsonConfig: []byte(`"counter"`), + yamlConfig: []byte("counter"), + wantInstrumentType: InstrumentTypeCounter, + }, + } { + t.Run(tt.name, func(t *testing.T) { + val := InstrumentType("") + err := val.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantInstrumentType, val) + + val = InstrumentType("") + err = yaml.Unmarshal(tt.yamlConfig, &val) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantInstrumentType, val) + }) + } +} + +func TestUnmarshalExperimentalPeerServiceMappingType(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantExperimentalPeerServiceMapping ExperimentalPeerServiceMapping + }{ + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("peer: []\nservice: true"), + wantErrT: newErrUnmarshal(&ExperimentalPeerServiceMapping{}), + }, + { + name: "missing required peer field", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + wantErrT: newErrRequired(&ExperimentalPeerServiceMapping{}, "peer"), + }, + { + name: "missing required service field", + jsonConfig: []byte(`{"peer":"test"}`), + yamlConfig: []byte("peer: test"), + wantErrT: newErrRequired(&ExperimentalPeerServiceMapping{}, "service"), + }, + { + name: "invalid string_array peer", + jsonConfig: []byte(`{"peer":[], "service": ["test-val", "test-val-2"], "type": "string_array"}`), + yamlConfig: []byte("peer: []\nservice: [test-val, test-val-2]\ntype: string_array\n"), + wantErrT: newErrUnmarshal(&ExperimentalPeerServiceMapping{}), + }, + { + name: "valid string service", + jsonConfig: []byte(`{"peer":"test", "service": "test-val"}`), + yamlConfig: []byte("peer: test\nservice: test-val"), + wantExperimentalPeerServiceMapping: ExperimentalPeerServiceMapping{ + Peer: "test", + Service: "test-val", + }, + }, + { + name: "invalid string_array service", + jsonConfig: []byte(`{"peer":"test", "service": ["test-val", "test-val-2"], "type": "string_array"}`), + yamlConfig: []byte("peer: test\nservice: [test-val, test-val-2]\ntype: string_array\n"), + wantErrT: newErrUnmarshal(&ExperimentalPeerServiceMapping{}), + }, + } { + t.Run(tt.name, func(t *testing.T) { + val := ExperimentalPeerServiceMapping{} + err := val.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExperimentalPeerServiceMapping, val) + + val = ExperimentalPeerServiceMapping{} + err = yaml.Unmarshal(tt.yamlConfig, &val) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExperimentalPeerServiceMapping, val) + }) + } +} + +func TestUnmarshalExporterDefaultHistogramAggregation(t *testing.T) { + var exporterDefaultHistogramAggregation ExporterDefaultHistogramAggregation + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantExporterDefaultHistogramAggregation ExporterDefaultHistogramAggregation + }{ + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("name: []\nvalue: true\ntype: bool\n"), + wantErrT: newErrUnmarshal(&exporterDefaultHistogramAggregation), + }, + { + name: "invalid histogram aggregation", + jsonConfig: []byte(`"test"`), + yamlConfig: []byte("test"), + wantErrT: newErrInvalid(`invalid histogram aggregation (expected one of []interface {}{"explicit_bucket_histogram", "base2_exponential_bucket_histogram"}): "test""`), + }, + { + name: "valid histogram aggregation", + jsonConfig: []byte(`"base2_exponential_bucket_histogram"`), + yamlConfig: []byte("base2_exponential_bucket_histogram"), + wantExporterDefaultHistogramAggregation: ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram, + }, + } { + t.Run(tt.name, func(t *testing.T) { + val := ExporterDefaultHistogramAggregation("") + err := val.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporterDefaultHistogramAggregation, val) + + val = ExporterDefaultHistogramAggregation("") + err = yaml.Unmarshal(tt.yamlConfig, &val) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporterDefaultHistogramAggregation, val) + }) + } +} + +func TestUnmarshalPullMetricReader(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantExporter PullMetricExporter + }{ + { + name: "valid with proemtheus exporter", + jsonConfig: []byte(`{"exporter":{"prometheus/development":{}}}`), + yamlConfig: []byte("exporter:\n prometheus/development: {}"), + wantExporter: PullMetricExporter{PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{}}, + }, + { + name: "missing required exporter field", + jsonConfig: []byte(`{}`), + yamlConfig: []byte("{}"), + wantErrT: newErrRequired(&PullMetricReader{}, "exporter"), + }, + { + name: "invalid data", + jsonConfig: []byte(`{:2000}`), + yamlConfig: []byte("exporter:\n prometheus/development: []"), + wantErrT: newErrUnmarshal(&PullMetricReader{}), + }, + } { + t.Run(tt.name, func(t *testing.T) { + cl := PullMetricReader{} + err := cl.UnmarshalJSON(tt.jsonConfig) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl.Exporter) + + cl = PullMetricReader{} + err = yaml.Unmarshal(tt.yamlConfig, &cl) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantExporter, cl.Exporter) + }) + } +} + +func TestUnmarshalResourceJson(t *testing.T) { + for _, tt := range []struct { + name string + yamlConfig []byte + jsonConfig []byte + wantErrT error + wantResource Resource + }{ + { + name: "valid with all detectors", + jsonConfig: []byte(`{"detection/development": {"detectors": [{"container": null},{"host": null},{"process": null},{"service": null}]}}`), + yamlConfig: []byte("detection/development:\n detectors:\n - container:\n - host:\n - process:\n - service:"), + wantResource: Resource{ + DetectionDevelopment: &ExperimentalResourceDetection{ + Detectors: []ExperimentalResourceDetector{ + { + Container: ExperimentalContainerResourceDetector{}, + }, + { + Host: ExperimentalHostResourceDetector{}, + }, + { + Process: ExperimentalProcessResourceDetector{}, + }, + { + Service: ExperimentalServiceResourceDetector{}, + }, + }, + }, + }, + }, + { + name: "valid non-nil with all detectors", + jsonConfig: []byte(`{"detection/development": {"detectors": [{"container": {}},{"host": {}},{"process": {}},{"service": {}}]}}`), + yamlConfig: []byte("detection/development:\n detectors:\n - container: {}\n - host: {}\n - process: {}\n - service: {}"), + wantResource: Resource{ + DetectionDevelopment: &ExperimentalResourceDetection{ + Detectors: []ExperimentalResourceDetector{ + { + Container: ExperimentalContainerResourceDetector{}, + }, + { + Host: ExperimentalHostResourceDetector{}, + }, + { + Process: ExperimentalProcessResourceDetector{}, + }, + { + Service: ExperimentalServiceResourceDetector{}, + }, + }, + }, + }, + }, + { + name: "invalid container detector", + jsonConfig: []byte(`{"detection/development": {"detectors": [{"container": 1}]}}`), + yamlConfig: []byte("detection/development:\n detectors:\n - container: 1"), + wantResource: Resource{ + DetectionDevelopment: &ExperimentalResourceDetection{ + Detectors: []ExperimentalResourceDetector{ + {}, + }, + }, + }, + wantErrT: newErrUnmarshal(&ExperimentalResourceDetector{}), + }, + { + name: "invalid host detector", + jsonConfig: []byte(`{"detection/development": {"detectors": [{"host": 1}]}}`), + yamlConfig: []byte("detection/development:\n detectors:\n - host: 1"), + wantResource: Resource{ + DetectionDevelopment: &ExperimentalResourceDetection{ + Detectors: []ExperimentalResourceDetector{ + {}, + }, + }, + }, + wantErrT: newErrUnmarshal(&ExperimentalResourceDetector{}), + }, + { + name: "invalid service detector", + jsonConfig: []byte(`{"detection/development": {"detectors": [{"service": 1}]}}`), + yamlConfig: []byte("detection/development:\n detectors:\n - service: 1"), + wantResource: Resource{ + DetectionDevelopment: &ExperimentalResourceDetection{ + Detectors: []ExperimentalResourceDetector{ + {}, + }, + }, + }, + wantErrT: newErrUnmarshal(&ExperimentalResourceDetector{}), + }, + { + name: "invalid process detector", + jsonConfig: []byte(`{"detection/development": {"detectors": [{"process": 1}]}}`), + yamlConfig: []byte("detection/development:\n detectors:\n - process: 1"), + wantResource: Resource{ + DetectionDevelopment: &ExperimentalResourceDetection{ + Detectors: []ExperimentalResourceDetector{ + {}, + }, + }, + }, + wantErrT: newErrUnmarshal(&ExperimentalResourceDetector{}), + }, + } { + t.Run(tt.name, func(t *testing.T) { + r := Resource{} + err := json.Unmarshal(tt.jsonConfig, &r) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantResource, r) + + r = Resource{} + err = yaml.Unmarshal(tt.yamlConfig, &r) + assert.ErrorIs(t, err, tt.wantErrT) + assert.Equal(t, tt.wantResource, r) + }) + } +} diff --git a/otelconf/x/config_yaml.go b/otelconf/x/config_yaml.go new file mode 100644 index 00000000000..7dc96394722 --- /dev/null +++ b/otelconf/x/config_yaml.go @@ -0,0 +1,553 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x // import "go.opentelemetry.io/contrib/otelconf/x" + +import ( + "errors" + "fmt" + "reflect" + + "go.yaml.in/yaml/v3" +) + +// hasYAMLMapKey reports whether the provided mapping node contains the given +// key. It assumes the node is a mapping node and performs a linear scan of its +// key nodes. +func hasYAMLMapKey(node *yaml.Node, key string) bool { + if node == nil || node.Kind != yaml.MappingNode { + return false + } + for i := 0; i+1 < len(node.Content); i += 2 { + if node.Content[i].Kind == yaml.ScalarNode && node.Content[i].Value == key { + return true + } + } + return false +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *ExperimentalResourceDetector) UnmarshalYAML(node *yaml.Node) error { + type Plain ExperimentalResourceDetector + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // container can be nil, must check and set here + if hasYAMLMapKey(node, "container") && plain.Container == nil { + plain.Container = ExperimentalContainerResourceDetector{} + } + // host can be nil, must check and set here + if hasYAMLMapKey(node, "host") && plain.Host == nil { + plain.Host = ExperimentalHostResourceDetector{} + } + // process can be nil, must check and set here + if hasYAMLMapKey(node, "process") && plain.Process == nil { + plain.Process = ExperimentalProcessResourceDetector{} + } + // service can be nil, must check and set here + if hasYAMLMapKey(node, "service") && plain.Service == nil { + plain.Service = ExperimentalServiceResourceDetector{} + } + *j = ExperimentalResourceDetector(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *PushMetricExporter) UnmarshalYAML(node *yaml.Node) error { + type Plain PushMetricExporter + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // console can be nil, must check and set here + if hasYAMLMapKey(node, "console") && plain.Console == nil { + plain.Console = &ConsoleMetricExporter{} + } + *j = PushMetricExporter(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *OpenTelemetryConfiguration) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "file_format") { + return newErrRequired(j, "file_format") + } + type Plain OpenTelemetryConfiguration + type shadow struct { + Plain + LogLevel *SeverityNumber `yaml:"log_level,omitempty"` + AttributeLimits *AttributeLimits `yaml:"attribute_limits,omitempty"` + Disabled *bool `yaml:"disabled,omitempty"` + FileFormat string `yaml:"file_format"` + LoggerProvider *LoggerProvider `yaml:"logger_provider,omitempty"` + MeterProvider *MeterProvider `yaml:"meter_provider,omitempty"` + TracerProvider *TracerProvider `yaml:"tracer_provider,omitempty"` + Propagator *Propagator `yaml:"propagator,omitempty"` + Resource *Resource `yaml:"resource,omitempty"` + InstrumentationDevelopment *ExperimentalInstrumentation `yaml:"instrumentation/development"` + } + var sh shadow + + if err := node.Decode(&sh); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + if sh.AttributeLimits != nil { + sh.Plain.AttributeLimits = sh.AttributeLimits + } + + sh.Plain.FileFormat = sh.FileFormat + if sh.Disabled != nil { + sh.Plain.Disabled = sh.Disabled + } else { + // Configure the log level of the internal logger used by the SDK. + // If omitted, info is used. + sh.Plain.Disabled = ptr(false) + } + if sh.LoggerProvider != nil { + sh.Plain.LoggerProvider = sh.LoggerProvider + } + if sh.MeterProvider != nil { + sh.Plain.MeterProvider = sh.MeterProvider + } + if sh.TracerProvider != nil { + sh.Plain.TracerProvider = sh.TracerProvider + } + if sh.Propagator != nil { + sh.Plain.Propagator = sh.Propagator + } + if sh.Resource != nil { + sh.Plain.Resource = sh.Resource + } + if sh.InstrumentationDevelopment != nil { + sh.Plain.InstrumentationDevelopment = sh.InstrumentationDevelopment + } + + if sh.LogLevel != nil { + sh.Plain.LogLevel = sh.LogLevel + } else { + // Configure the log level of the internal logger used by the SDK. + // If omitted, info is used. + sh.Plain.LogLevel = ptr(SeverityNumberInfo) + } + + *j = OpenTelemetryConfiguration(sh.Plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *SpanExporter) UnmarshalYAML(node *yaml.Node) error { + type Plain SpanExporter + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // console can be nil, must check and set here + if hasYAMLMapKey(node, "console") && plain.Console == nil { + plain.Console = ConsoleExporter{} + } + *j = SpanExporter(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *LogRecordExporter) UnmarshalYAML(node *yaml.Node) error { + type Plain LogRecordExporter + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // console can be nil, must check and set here + if hasYAMLMapKey(node, "console") && plain.Console == nil { + plain.Console = ConsoleExporter{} + } + *j = LogRecordExporter(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *TextMapPropagator) UnmarshalYAML(node *yaml.Node) error { + type Plain TextMapPropagator + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + // b3 can be nil, must check and set here + if hasYAMLMapKey(node, "b3") && plain.B3 == nil { + plain.B3 = B3Propagator{} + } + // b3multi can be nil, must check and set here + if hasYAMLMapKey(node, "b3multi") && plain.B3Multi == nil { + plain.B3Multi = B3MultiPropagator{} + } + // baggage can be nil, must check and set here + if hasYAMLMapKey(node, "baggage") && plain.Baggage == nil { + plain.Baggage = BaggagePropagator{} + } + // jaeger can be nil, must check and set here + if hasYAMLMapKey(node, "jaeger") && plain.Jaeger == nil { + plain.Jaeger = JaegerPropagator{} + } + // ottrace can be nil, must check and set here + if hasYAMLMapKey(node, "ottrace") && plain.Ottrace == nil { + plain.Ottrace = OpenTracingPropagator{} + } + // tracecontext can be nil, must check and set here + if hasYAMLMapKey(node, "tracecontext") && plain.Tracecontext == nil { + plain.Tracecontext = TraceContextPropagator{} + } + *j = TextMapPropagator(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *BatchLogRecordProcessor) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "exporter") { + return newErrRequired(j, "exporter") + } + type Plain BatchLogRecordProcessor + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if err := validateBatchLogRecordProcessor((*BatchLogRecordProcessor)(&plain)); err != nil { + return err + } + *j = BatchLogRecordProcessor(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *Sampler) UnmarshalYAML(node *yaml.Node) error { + var raw map[string]any + if err := node.Decode(&raw); err != nil { + return err + } + type Plain Sampler + var plain Plain + if err := node.Decode(&plain); err != nil { + return err + } + unmarshalSamplerTypes(raw, (*Sampler)(&plain)) + *j = Sampler(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *MetricProducer) UnmarshalYAML(node *yaml.Node) error { + var raw map[string]any + if err := node.Decode(&raw); err != nil { + return err + } + type Plain MetricProducer + var plain Plain + if err := node.Decode(&plain); err != nil { + return err + } + unmarshalMetricProducer(raw, (*MetricProducer)(&plain)) + *j = MetricProducer(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *BatchSpanProcessor) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "exporter") { + return newErrRequired(j, "exporter") + } + type Plain BatchSpanProcessor + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if err := validateBatchSpanProcessor((*BatchSpanProcessor)(&plain)); err != nil { + return err + } + *j = BatchSpanProcessor(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *PeriodicMetricReader) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "exporter") { + return newErrRequired(j, "exporter") + } + type Plain PeriodicMetricReader + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if err := validatePeriodicMetricReader((*PeriodicMetricReader)(&plain)); err != nil { + return err + } + *j = PeriodicMetricReader(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *CardinalityLimits) UnmarshalYAML(node *yaml.Node) error { + type Plain CardinalityLimits + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if err := validateCardinalityLimits((*CardinalityLimits)(&plain)); err != nil { + return err + } + *j = CardinalityLimits(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *SpanLimits) UnmarshalYAML(node *yaml.Node) error { + type Plain SpanLimits + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if err := validateSpanLimits((*SpanLimits)(&plain)); err != nil { + return err + } + *j = SpanLimits(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *OTLPHttpMetricExporter) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "endpoint") { + return newErrRequired(j, "endpoint") + } + type Plain OTLPHttpMetricExporter + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if plain.Timeout != nil && 0 > *plain.Timeout { + return newErrGreaterOrEqualZero("timeout") + } + *j = OTLPHttpMetricExporter(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *OTLPGrpcMetricExporter) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "endpoint") { + return newErrRequired(j, "endpoint") + } + type Plain OTLPGrpcMetricExporter + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if plain.Timeout != nil && 0 > *plain.Timeout { + return newErrGreaterOrEqualZero("timeout") + } + *j = OTLPGrpcMetricExporter(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *OTLPHttpExporter) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "endpoint") { + return newErrRequired(j, "endpoint") + } + type Plain OTLPHttpExporter + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if plain.Timeout != nil && 0 > *plain.Timeout { + return newErrGreaterOrEqualZero("timeout") + } + *j = OTLPHttpExporter(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *OTLPGrpcExporter) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "endpoint") { + return newErrRequired(j, "endpoint") + } + type Plain OTLPGrpcExporter + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if plain.Timeout != nil && 0 > *plain.Timeout { + return newErrGreaterOrEqualZero("timeout") + } + *j = OTLPGrpcExporter(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *AttributeType) UnmarshalYAML(node *yaml.Node) error { + var v string + if err := node.Decode(&v); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + var ok bool + for _, expected := range enumValuesAttributeType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return newErrInvalid(fmt.Sprintf("unexpected value type %#v, expected one of %#v)", v, enumValuesAttributeType)) + } + *j = AttributeType(v) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *AttributeNameValue) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "name") { + return newErrRequired(j, "name") + } + if !hasYAMLMapKey(node, "value") { + return newErrRequired(j, "value") + } + type Plain AttributeNameValue + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + // yaml unmarshaller defaults to unmarshalling to int + if plain.Type != nil && *plain.Type == AttributeTypeDouble { + val, ok := plain.Value.(int) + if ok { + plain.Value = float64(val) + } + } + + if plain.Type != nil && *plain.Type == AttributeTypeDoubleArray { + m, ok := plain.Value.([]any) + if ok { + var vals []any + for _, v := range m { + val, ok := v.(int) + if ok { + vals = append(vals, float64(val)) + } else { + vals = append(vals, v) + } + } + plain.Value = vals + } + } + + *j = AttributeNameValue(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *SimpleLogRecordProcessor) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "exporter") { + return newErrRequired(j, "exporter") + } + type Plain SimpleLogRecordProcessor + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + *j = SimpleLogRecordProcessor(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *SimpleSpanProcessor) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "exporter") { + return newErrRequired(j, "exporter") + } + type Plain SimpleSpanProcessor + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + *j = SimpleSpanProcessor(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *NameStringValuePair) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "name") { + return newErrRequired(j, "name") + } + if !hasYAMLMapKey(node, "value") { + return newErrRequired(j, "value") + } + + type Plain NameStringValuePair + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + *j = NameStringValuePair(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *InstrumentType) UnmarshalYAML(node *yaml.Node) error { + type Plain InstrumentType + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if err := supportedInstrumentType(InstrumentType(plain)); err != nil { + return err + } + + *j = InstrumentType(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *ExperimentalPeerServiceMapping) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "peer") { + return newErrRequired(j, "peer") + } + if !hasYAMLMapKey(node, "service") { + return newErrRequired(j, "service") + } + + type Plain ExperimentalPeerServiceMapping + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + + *j = ExperimentalPeerServiceMapping(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *ExporterDefaultHistogramAggregation) UnmarshalYAML(node *yaml.Node) error { + type Plain ExporterDefaultHistogramAggregation + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + if err := supportedHistogramAggregation(ExporterDefaultHistogramAggregation(plain)); err != nil { + return err + } + + *j = ExporterDefaultHistogramAggregation(plain) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *PullMetricReader) UnmarshalYAML(node *yaml.Node) error { + if !hasYAMLMapKey(node, "exporter") { + return newErrRequired(j, "exporter") + } + type Plain PullMetricReader + var plain Plain + if err := node.Decode(&plain); err != nil { + return errors.Join(newErrUnmarshal(j), err) + } + *j = PullMetricReader(plain) + return nil +} diff --git a/otelconf/x/doc.go b/otelconf/x/doc.go new file mode 100644 index 00000000000..ea077bc31e4 --- /dev/null +++ b/otelconf/x/doc.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package otelconf can be used to parse a configuration file that follows +// the JSON Schema defined by the OpenTelemetry Configuration schema. Different +// versions of the schema are supported by the code in the directory that +// matches the version number of the schema. For example, the import +// go.opentelemetry.io/contrib/otelconf/v0.3.0 includes code that supports the +// v0.3.0 release of the configuration schema. +package x // import "go.opentelemetry.io/contrib/otelconf/x" diff --git a/otelconf/x/example_test.go b/otelconf/x/example_test.go new file mode 100644 index 00000000000..9320f190238 --- /dev/null +++ b/otelconf/x/example_test.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x_test + +import ( + "context" + "log" + "os" + "path/filepath" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/log/global" + + "go.opentelemetry.io/contrib/otelconf" +) + +func Example() { + b, err := os.ReadFile(filepath.Join("testdata", "v1.0.0.yaml")) + if err != nil { + log.Fatal(err) + } + + // Parse a configuration file into an OpenTelemetryConfiguration model. + c, err := otelconf.ParseYAML(b) + if err != nil { + log.Fatal(err) + } + + // Create SDK components with the parsed configuration. + s, err := otelconf.NewSDK(otelconf.WithOpenTelemetryConfiguration(*c)) + if err != nil { + log.Fatal(err) + } + + // Ensure shutdown is eventually called for all components of the SDK. + defer func() { + if err := s.Shutdown(context.Background()); err != nil { + log.Fatal(err) + } + }() + + // Set the global providers. + otel.SetTracerProvider(s.TracerProvider()) + otel.SetMeterProvider(s.MeterProvider()) + global.SetLoggerProvider(s.LoggerProvider()) + // Set the global propagator. + otel.SetTextMapPropagator(s.Propagator()) +} diff --git a/otelconf/x/fuzz_test.go b/otelconf/x/fuzz_test.go new file mode 100644 index 00000000000..7852ba91130 --- /dev/null +++ b/otelconf/x/fuzz_test.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func FuzzJSON(f *testing.F) { + b, err := os.ReadFile(filepath.Join("testdata", "v1.0.0.json")) + require.NoError(f, err) + f.Add(b) + + f.Fuzz(func(t *testing.T, data []byte) { + t.Log("JSON:\n" + string(data)) + + var cfg OpenTelemetryConfiguration + err := json.Unmarshal(data, &cfg) + if err != nil { + return + } + + sdk, err := NewSDK(WithOpenTelemetryConfiguration(cfg)) + if err != nil { + return + } + + ctx, cancel := context.WithTimeout(t.Context(), time.Millisecond) + defer cancel() + _ = sdk.Shutdown(ctx) + }) +} + +func FuzzYAML(f *testing.F) { + b, err := os.ReadFile(filepath.Join("testdata", "v1.0.0.yaml")) + require.NoError(f, err) + f.Add(b) + + f.Fuzz(func(t *testing.T, data []byte) { + t.Log("YAML:\n" + string(data)) + + cfg, err := ParseYAML(data) + if err != nil { + return + } + + sdk, err := NewSDK(WithOpenTelemetryConfiguration(*cfg)) + if err != nil { + return + } + + ctx, cancel := context.WithTimeout(t.Context(), time.Millisecond) + defer cancel() + _ = sdk.Shutdown(ctx) + }) +} diff --git a/otelconf/x/generated_config.go b/otelconf/x/generated_config.go new file mode 100644 index 00000000000..ff1464c23b9 --- /dev/null +++ b/otelconf/x/generated_config.go @@ -0,0 +1,2368 @@ +// Code generated by github.com/atombender/go-jsonschema, DO NOT EDIT. + +package x + +type Aggregation struct { + // Configures the stream to collect data for the exponential histogram metric + // point, which uses a base-2 exponential formula to determine bucket boundaries + // and an integer scale parameter to control resolution. See + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation + // for details. + // If omitted, ignore. + // + Base2ExponentialBucketHistogram *Base2ExponentialBucketHistogramAggregation `json:"base2_exponential_bucket_histogram,omitempty" yaml:"base2_exponential_bucket_histogram,omitempty" mapstructure:"base2_exponential_bucket_histogram,omitempty"` + + // Configures the stream to use the instrument kind to select an aggregation and + // advisory parameters to influence aggregation configuration parameters. See + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#default-aggregation + // for details. + // If omitted, ignore. + // + Default DefaultAggregation `json:"default,omitempty" yaml:"default,omitempty" mapstructure:"default,omitempty"` + + // Configures the stream to ignore/drop all instrument measurements. See + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#drop-aggregation + // for details. + // If omitted, ignore. + // + Drop DropAggregation `json:"drop,omitempty" yaml:"drop,omitempty" mapstructure:"drop,omitempty"` + + // Configures the stream to collect data for the histogram metric point using a + // set of explicit boundary values for histogram bucketing. See + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation + // for details + // If omitted, ignore. + // + ExplicitBucketHistogram *ExplicitBucketHistogramAggregation `json:"explicit_bucket_histogram,omitempty" yaml:"explicit_bucket_histogram,omitempty" mapstructure:"explicit_bucket_histogram,omitempty"` + + // Configures the stream to collect data using the last measurement. See + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#last-value-aggregation + // for details. + // If omitted, ignore. + // + LastValue LastValueAggregation `json:"last_value,omitempty" yaml:"last_value,omitempty" mapstructure:"last_value,omitempty"` + + // Configures the stream to collect the arithmetic sum of measurement values. See + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#sum-aggregation + // for details. + // If omitted, ignore. + // + Sum SumAggregation `json:"sum,omitempty" yaml:"sum,omitempty" mapstructure:"sum,omitempty"` +} + +type AlwaysOffSampler map[string]interface{} + +type AlwaysOnSampler map[string]interface{} + +type AttributeLimits struct { + // Configure max attribute count. + // Value must be non-negative. + // If omitted or null, 128 is used. + // + AttributeCountLimit AttributeLimitsAttributeCountLimit `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` + + // Configure max attribute value size. + // Value must be non-negative. + // If omitted or null, there is no limit. + // + AttributeValueLengthLimit AttributeLimitsAttributeValueLengthLimit `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` +} + +// Configure max attribute count. +// Value must be non-negative. +// If omitted or null, 128 is used. +type AttributeLimitsAttributeCountLimit *int + +// Configure max attribute value size. +// Value must be non-negative. +// If omitted or null, there is no limit. +type AttributeLimitsAttributeValueLengthLimit *int + +type AttributeNameValue struct { + // The attribute name. + // Property is required and must be non-null. + // + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The attribute type. + // Values include: + // * bool: Boolean attribute value. + // * bool_array: Boolean array attribute value. + // * double: Double attribute value. + // * double_array: Double array attribute value. + // * int: Integer attribute value. + // * int_array: Integer array attribute value. + // * string: String attribute value. + // * string_array: String array attribute value. + // If omitted, string is used. + // + Type *AttributeType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` + + // The attribute value. + // The type of value must match .type. + // Property is required and must be non-null. + // + Value interface{} `json:"value" yaml:"value" mapstructure:"value"` +} + +type AttributeType string + +const AttributeTypeBool AttributeType = "bool" +const AttributeTypeBoolArray AttributeType = "bool_array" +const AttributeTypeDouble AttributeType = "double" +const AttributeTypeDoubleArray AttributeType = "double_array" +const AttributeTypeInt AttributeType = "int" +const AttributeTypeIntArray AttributeType = "int_array" +const AttributeTypeString AttributeType = "string" +const AttributeTypeStringArray AttributeType = "string_array" + +type B3MultiPropagator map[string]interface{} + +type B3Propagator map[string]interface{} + +type BaggagePropagator map[string]interface{} + +type Base2ExponentialBucketHistogramAggregation struct { + // Configure the max scale factor. + // If omitted or null, 20 is used. + // + MaxScale Base2ExponentialBucketHistogramAggregationMaxScale `json:"max_scale,omitempty" yaml:"max_scale,omitempty" mapstructure:"max_scale,omitempty"` + + // Configure the maximum number of buckets in each of the positive and negative + // ranges, not counting the special zero bucket. + // If omitted or null, 160 is used. + // + MaxSize Base2ExponentialBucketHistogramAggregationMaxSize `json:"max_size,omitempty" yaml:"max_size,omitempty" mapstructure:"max_size,omitempty"` + + // Configure whether or not to record min and max. + // If omitted or null, true is used. + // + RecordMinMax Base2ExponentialBucketHistogramAggregationRecordMinMax `json:"record_min_max,omitempty" yaml:"record_min_max,omitempty" mapstructure:"record_min_max,omitempty"` +} + +// Configure the max scale factor. +// If omitted or null, 20 is used. +type Base2ExponentialBucketHistogramAggregationMaxScale *int + +// Configure the maximum number of buckets in each of the positive and negative +// ranges, not counting the special zero bucket. +// If omitted or null, 160 is used. +type Base2ExponentialBucketHistogramAggregationMaxSize *int + +// Configure whether or not to record min and max. +// If omitted or null, true is used. +type Base2ExponentialBucketHistogramAggregationRecordMinMax *bool + +type BatchLogRecordProcessor struct { + // Configure maximum allowed time (in milliseconds) to export data. + // Value must be non-negative. A value of 0 indicates no limit (infinity). + // If omitted or null, 30000 is used. + // + ExportTimeout BatchLogRecordProcessorExportTimeout `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` + + // Configure exporter. + // Property is required and must be non-null. + // + Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // Configure maximum batch size. Value must be positive. + // If omitted or null, 512 is used. + // + MaxExportBatchSize BatchLogRecordProcessorMaxExportBatchSize `json:"max_export_batch_size,omitempty" yaml:"max_export_batch_size,omitempty" mapstructure:"max_export_batch_size,omitempty"` + + // Configure maximum queue size. Value must be positive. + // If omitted or null, 2048 is used. + // + MaxQueueSize BatchLogRecordProcessorMaxQueueSize `json:"max_queue_size,omitempty" yaml:"max_queue_size,omitempty" mapstructure:"max_queue_size,omitempty"` + + // Configure delay interval (in milliseconds) between two consecutive exports. + // Value must be non-negative. + // If omitted or null, 1000 is used. + // + ScheduleDelay BatchLogRecordProcessorScheduleDelay `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` +} + +// Configure maximum allowed time (in milliseconds) to export data. +// Value must be non-negative. A value of 0 indicates no limit (infinity). +// If omitted or null, 30000 is used. +type BatchLogRecordProcessorExportTimeout *int + +// Configure maximum batch size. Value must be positive. +// If omitted or null, 512 is used. +type BatchLogRecordProcessorMaxExportBatchSize *int + +// Configure maximum queue size. Value must be positive. +// If omitted or null, 2048 is used. +type BatchLogRecordProcessorMaxQueueSize *int + +// Configure delay interval (in milliseconds) between two consecutive exports. +// Value must be non-negative. +// If omitted or null, 1000 is used. +type BatchLogRecordProcessorScheduleDelay *int + +type BatchSpanProcessor struct { + // Configure maximum allowed time (in milliseconds) to export data. + // Value must be non-negative. A value of 0 indicates no limit (infinity). + // If omitted or null, 30000 is used. + // + ExportTimeout BatchSpanProcessorExportTimeout `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` + + // Configure exporter. + // Property is required and must be non-null. + // + Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // Configure maximum batch size. Value must be positive. + // If omitted or null, 512 is used. + // + MaxExportBatchSize BatchSpanProcessorMaxExportBatchSize `json:"max_export_batch_size,omitempty" yaml:"max_export_batch_size,omitempty" mapstructure:"max_export_batch_size,omitempty"` + + // Configure maximum queue size. Value must be positive. + // If omitted or null, 2048 is used. + // + MaxQueueSize BatchSpanProcessorMaxQueueSize `json:"max_queue_size,omitempty" yaml:"max_queue_size,omitempty" mapstructure:"max_queue_size,omitempty"` + + // Configure delay interval (in milliseconds) between two consecutive exports. + // Value must be non-negative. + // If omitted or null, 5000 is used. + // + ScheduleDelay BatchSpanProcessorScheduleDelay `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` +} + +// Configure maximum allowed time (in milliseconds) to export data. +// Value must be non-negative. A value of 0 indicates no limit (infinity). +// If omitted or null, 30000 is used. +type BatchSpanProcessorExportTimeout *int + +// Configure maximum batch size. Value must be positive. +// If omitted or null, 512 is used. +type BatchSpanProcessorMaxExportBatchSize *int + +// Configure maximum queue size. Value must be positive. +// If omitted or null, 2048 is used. +type BatchSpanProcessorMaxQueueSize *int + +// Configure delay interval (in milliseconds) between two consecutive exports. +// Value must be non-negative. +// If omitted or null, 5000 is used. +type BatchSpanProcessorScheduleDelay *int + +type CardinalityLimits struct { + // Configure default cardinality limit for counter instruments. + // If omitted or null, the value from .default is used. + // + Counter CardinalityLimitsCounter `json:"counter,omitempty" yaml:"counter,omitempty" mapstructure:"counter,omitempty"` + + // Configure default cardinality limit for all instrument types. + // Instrument-specific cardinality limits take priority. + // If omitted or null, 2000 is used. + // + Default CardinalityLimitsDefault `json:"default,omitempty" yaml:"default,omitempty" mapstructure:"default,omitempty"` + + // Configure default cardinality limit for gauge instruments. + // If omitted or null, the value from .default is used. + // + Gauge CardinalityLimitsGauge `json:"gauge,omitempty" yaml:"gauge,omitempty" mapstructure:"gauge,omitempty"` + + // Configure default cardinality limit for histogram instruments. + // If omitted or null, the value from .default is used. + // + Histogram CardinalityLimitsHistogram `json:"histogram,omitempty" yaml:"histogram,omitempty" mapstructure:"histogram,omitempty"` + + // Configure default cardinality limit for observable_counter instruments. + // If omitted or null, the value from .default is used. + // + ObservableCounter CardinalityLimitsObservableCounter `json:"observable_counter,omitempty" yaml:"observable_counter,omitempty" mapstructure:"observable_counter,omitempty"` + + // Configure default cardinality limit for observable_gauge instruments. + // If omitted or null, the value from .default is used. + // + ObservableGauge CardinalityLimitsObservableGauge `json:"observable_gauge,omitempty" yaml:"observable_gauge,omitempty" mapstructure:"observable_gauge,omitempty"` + + // Configure default cardinality limit for observable_up_down_counter instruments. + // If omitted or null, the value from .default is used. + // + ObservableUpDownCounter CardinalityLimitsObservableUpDownCounter `json:"observable_up_down_counter,omitempty" yaml:"observable_up_down_counter,omitempty" mapstructure:"observable_up_down_counter,omitempty"` + + // Configure default cardinality limit for up_down_counter instruments. + // If omitted or null, the value from .default is used. + // + UpDownCounter CardinalityLimitsUpDownCounter `json:"up_down_counter,omitempty" yaml:"up_down_counter,omitempty" mapstructure:"up_down_counter,omitempty"` +} + +// Configure default cardinality limit for counter instruments. +// If omitted or null, the value from .default is used. +type CardinalityLimitsCounter *int + +// Configure default cardinality limit for all instrument types. +// Instrument-specific cardinality limits take priority. +// If omitted or null, 2000 is used. +type CardinalityLimitsDefault *int + +// Configure default cardinality limit for gauge instruments. +// If omitted or null, the value from .default is used. +type CardinalityLimitsGauge *int + +// Configure default cardinality limit for histogram instruments. +// If omitted or null, the value from .default is used. +type CardinalityLimitsHistogram *int + +// Configure default cardinality limit for observable_counter instruments. +// If omitted or null, the value from .default is used. +type CardinalityLimitsObservableCounter *int + +// Configure default cardinality limit for observable_gauge instruments. +// If omitted or null, the value from .default is used. +type CardinalityLimitsObservableGauge *int + +// Configure default cardinality limit for observable_up_down_counter instruments. +// If omitted or null, the value from .default is used. +type CardinalityLimitsObservableUpDownCounter *int + +// Configure default cardinality limit for up_down_counter instruments. +// If omitted or null, the value from .default is used. +type CardinalityLimitsUpDownCounter *int + +type ConsoleExporter map[string]interface{} + +type ConsoleMetricExporter struct { + // Configure default histogram aggregation. + // Values include: + // * base2_exponential_bucket_histogram: Use base2 exponential histogram as the + // default aggregation for histogram instruments. + // * explicit_bucket_histogram: Use explicit bucket histogram as the default + // aggregation for histogram instruments. + // If omitted, explicit_bucket_histogram is used. + // + DefaultHistogramAggregation *ExporterDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` + + // Configure temporality preference. + // Values include: + // * cumulative: Use cumulative aggregation temporality for all instrument types. + // * delta: Use delta aggregation for all instrument types except up down counter + // and asynchronous up down counter. + // * low_memory: Use delta aggregation temporality for counter and histogram + // instrument types. Use cumulative aggregation temporality for all other + // instrument types. + // If omitted, cumulative is used. + // + TemporalityPreference *ExporterTemporalityPreference `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` +} + +type DefaultAggregation map[string]interface{} + +type Distribution map[string]map[string]interface{} + +type DropAggregation map[string]interface{} + +type ExemplarFilter string + +const ExemplarFilterAlwaysOff ExemplarFilter = "always_off" +const ExemplarFilterAlwaysOn ExemplarFilter = "always_on" +const ExemplarFilterTraceBased ExemplarFilter = "trace_based" + +type ExperimentalComposableAlwaysOffSampler map[string]interface{} + +type ExperimentalComposableAlwaysOnSampler map[string]interface{} + +type ExperimentalComposableParentThresholdSampler struct { + // Sampler to use when there is no parent. + // Property is required and must be non-null. + // + Root ExperimentalComposableSampler `json:"root" yaml:"root" mapstructure:"root"` +} + +type ExperimentalComposableProbabilitySampler struct { + // Configure ratio. + // If omitted or null, 1.0 is used. + // + Ratio ExperimentalComposableProbabilitySamplerRatio `json:"ratio,omitempty" yaml:"ratio,omitempty" mapstructure:"ratio,omitempty"` +} + +// Configure ratio. +// If omitted or null, 1.0 is used. +type ExperimentalComposableProbabilitySamplerRatio *float64 + +type ExperimentalComposableRuleBasedSampler struct { + // The rules for the sampler, matched in order. If no rules match, the span is not + // sampled. + // If omitted or null, no span is sampled. + // + Rules *ExperimentalComposableRuleBasedSamplerRules `json:"rules,omitempty" yaml:"rules,omitempty" mapstructure:"rules,omitempty"` +} + +// A rule for ExperimentalComposableRuleBasedSampler. A rule can have multiple +// match conditions - the sampler will be applied if all match. +// If no conditions are specified, the rule matches all spans that reach it. +type ExperimentalComposableRuleBasedSamplerRule struct { + // Patterns to match against a single attribute. Non-string attributes are matched + // using their string representation: + // for example, a pattern of "4*" would match any http.response.status_code in + // 400-499. For array attributes, if any + // item matches, it is considered a match. + // If omitted, ignore. + // + AttributePatterns *ExperimentalComposableRuleBasedSamplerRuleAttributePatterns `json:"attribute_patterns,omitempty" yaml:"attribute_patterns,omitempty" mapstructure:"attribute_patterns,omitempty"` + + // Values to match against a single attribute. Non-string attributes are matched + // using their string representation: + // for example, a value of "404" would match the http.response.status_code 404. + // For array attributes, if any + // item matches, it is considered a match. + // If omitted, ignore. + // + AttributeValues *ExperimentalComposableRuleBasedSamplerRuleAttributeValues `json:"attribute_values,omitempty" yaml:"attribute_values,omitempty" mapstructure:"attribute_values,omitempty"` + + // The parent span types to match. + // Values include: + // * local: local, a local parent. + // * none: none, no parent, i.e., the trace root. + // * remote: remote, a remote parent. + // If omitted, ignore. + // + Parent []ExperimentalSpanParent `json:"parent,omitempty" yaml:"parent,omitempty" mapstructure:"parent,omitempty"` + + // The sampler to use for matching spans. + // Property is required and must be non-null. + // + Sampler ExperimentalComposableSampler `json:"sampler" yaml:"sampler" mapstructure:"sampler"` + + // The span kinds to match. If the span's kind matches any of these, it matches. + // Values include: + // * client: client, a client span. + // * consumer: consumer, a consumer span. + // * internal: internal, an internal span. + // * producer: producer, a producer span. + // * server: server, a server span. + // If omitted, ignore. + // + SpanKinds []SpanKind `json:"span_kinds,omitempty" yaml:"span_kinds,omitempty" mapstructure:"span_kinds,omitempty"` +} + +type ExperimentalComposableRuleBasedSamplerRuleAttributePatterns struct { + // Configure list of value patterns to exclude. Applies after .included (i.e. + // excluded has higher priority than included). + // Values are evaluated to match as follows: + // * If the value exactly matches. + // * If the value matches the wildcard pattern, where '?' matches any single + // character and '*' matches any number of characters including none. + // If omitted, .included attributes are included. + // + Excluded []string `json:"excluded,omitempty" yaml:"excluded,omitempty" mapstructure:"excluded,omitempty"` + + // Configure list of value patterns to include. + // Values are evaluated to match as follows: + // * If the value exactly matches. + // * If the value matches the wildcard pattern, where '?' matches any single + // character and '*' matches any number of characters including none. + // If omitted, all values are included. + // + Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` + + // The attribute key to match against. + // Property is required and must be non-null. + // + Key string `json:"key" yaml:"key" mapstructure:"key"` +} + +type ExperimentalComposableRuleBasedSamplerRuleAttributeValues struct { + // The attribute key to match against. + // Property is required and must be non-null. + // + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // The attribute values to match against. If the attribute's value matches any of + // these, it matches. + // Property is required and must be non-null. + // + Values []string `json:"values" yaml:"values" mapstructure:"values"` +} + +// The rules for the sampler, matched in order. If no rules match, the span is not +// sampled. +// If omitted or null, no span is sampled. +type ExperimentalComposableRuleBasedSamplerRules []ExperimentalComposableRuleBasedSamplerRule + +type ExperimentalComposableSampler struct { + // Configure sampler to be always_off. + // If omitted, ignore. + // + AlwaysOff ExperimentalComposableAlwaysOffSampler `json:"always_off,omitempty" yaml:"always_off,omitempty" mapstructure:"always_off,omitempty"` + + // Configure sampler to be always_on. + // If omitted, ignore. + // + AlwaysOn ExperimentalComposableAlwaysOnSampler `json:"always_on,omitempty" yaml:"always_on,omitempty" mapstructure:"always_on,omitempty"` + + // Configure sampler to be parent_threshold. + // If omitted, ignore. + // + ParentThreshold *ExperimentalComposableParentThresholdSampler `json:"parent_threshold,omitempty" yaml:"parent_threshold,omitempty" mapstructure:"parent_threshold,omitempty"` + + // Configure sampler to be probability. + // If omitted, ignore. + // + Probability *ExperimentalComposableProbabilitySampler `json:"probability,omitempty" yaml:"probability,omitempty" mapstructure:"probability,omitempty"` + + // Configure sampler to be rule_based. + // If omitted, ignore. + // + RuleBased *ExperimentalComposableRuleBasedSampler `json:"rule_based,omitempty" yaml:"rule_based,omitempty" mapstructure:"rule_based,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type ExperimentalContainerResourceDetector map[string]interface{} + +type ExperimentalGeneralInstrumentation struct { + // Configure instrumentations following the http semantic conventions. + // See http semantic conventions: + // https://opentelemetry.io/docs/specs/semconv/http/ + // If omitted, defaults as described in ExperimentalHttpInstrumentation are used. + // + Http *ExperimentalHttpInstrumentation `json:"http,omitempty" yaml:"http,omitempty" mapstructure:"http,omitempty"` + + // Configure instrumentations following the peer semantic conventions. + // See peer semantic conventions: + // https://opentelemetry.io/docs/specs/semconv/attributes-registry/peer/ + // If omitted, defaults as described in ExperimentalPeerInstrumentation are used. + // + Peer *ExperimentalPeerInstrumentation `json:"peer,omitempty" yaml:"peer,omitempty" mapstructure:"peer,omitempty"` +} + +type ExperimentalHostResourceDetector map[string]interface{} + +type ExperimentalHttpClientInstrumentation struct { + // Configure headers to capture for outbound http requests. + // If omitted, no outbound request headers are captured. + // + RequestCapturedHeaders []string `json:"request_captured_headers,omitempty" yaml:"request_captured_headers,omitempty" mapstructure:"request_captured_headers,omitempty"` + + // Configure headers to capture for inbound http responses. + // If omitted, no inbound response headers are captured. + // + ResponseCapturedHeaders []string `json:"response_captured_headers,omitempty" yaml:"response_captured_headers,omitempty" mapstructure:"response_captured_headers,omitempty"` +} + +type ExperimentalHttpInstrumentation struct { + // Configure instrumentations following the http client semantic conventions. + // If omitted, defaults as described in ExperimentalHttpClientInstrumentation are + // used. + // + Client *ExperimentalHttpClientInstrumentation `json:"client,omitempty" yaml:"client,omitempty" mapstructure:"client,omitempty"` + + // Configure instrumentations following the http server semantic conventions. + // If omitted, defaults as described in ExperimentalHttpServerInstrumentation are + // used. + // + Server *ExperimentalHttpServerInstrumentation `json:"server,omitempty" yaml:"server,omitempty" mapstructure:"server,omitempty"` +} + +type ExperimentalHttpServerInstrumentation struct { + // Configure headers to capture for inbound http requests. + // If omitted, no request headers are captured. + // + RequestCapturedHeaders []string `json:"request_captured_headers,omitempty" yaml:"request_captured_headers,omitempty" mapstructure:"request_captured_headers,omitempty"` + + // Configure headers to capture for outbound http responses. + // If omitted, no response headers are captures. + // + ResponseCapturedHeaders []string `json:"response_captured_headers,omitempty" yaml:"response_captured_headers,omitempty" mapstructure:"response_captured_headers,omitempty"` +} + +type ExperimentalInstrumentation struct { + // Configure C++ language-specific instrumentation libraries. + // If omitted, instrumentation defaults are used. + // + Cpp ExperimentalLanguageSpecificInstrumentation `json:"cpp,omitempty" yaml:"cpp,omitempty" mapstructure:"cpp,omitempty"` + + // Configure .NET language-specific instrumentation libraries. + // Each entry's key identifies a particular instrumentation library. The + // corresponding value configures it. + // If omitted, instrumentation defaults are used. + // + Dotnet ExperimentalLanguageSpecificInstrumentation `json:"dotnet,omitempty" yaml:"dotnet,omitempty" mapstructure:"dotnet,omitempty"` + + // Configure Erlang language-specific instrumentation libraries. + // Each entry's key identifies a particular instrumentation library. The + // corresponding value configures it. + // If omitted, instrumentation defaults are used. + // + Erlang ExperimentalLanguageSpecificInstrumentation `json:"erlang,omitempty" yaml:"erlang,omitempty" mapstructure:"erlang,omitempty"` + + // Configure general SemConv options that may apply to multiple languages and + // instrumentations. + // Instrumenation may merge general config options with the language specific + // configuration at .instrumentation.. + // If omitted, default values as described in ExperimentalGeneralInstrumentation + // are used. + // + General *ExperimentalGeneralInstrumentation `json:"general,omitempty" yaml:"general,omitempty" mapstructure:"general,omitempty"` + + // Configure Go language-specific instrumentation libraries. + // Each entry's key identifies a particular instrumentation library. The + // corresponding value configures it. + // If omitted, instrumentation defaults are used. + // + Go ExperimentalLanguageSpecificInstrumentation `json:"go,omitempty" yaml:"go,omitempty" mapstructure:"go,omitempty"` + + // Configure Java language-specific instrumentation libraries. + // Each entry's key identifies a particular instrumentation library. The + // corresponding value configures it. + // If omitted, instrumentation defaults are used. + // + Java ExperimentalLanguageSpecificInstrumentation `json:"java,omitempty" yaml:"java,omitempty" mapstructure:"java,omitempty"` + + // Configure JavaScript language-specific instrumentation libraries. + // Each entry's key identifies a particular instrumentation library. The + // corresponding value configures it. + // If omitted, instrumentation defaults are used. + // + Js ExperimentalLanguageSpecificInstrumentation `json:"js,omitempty" yaml:"js,omitempty" mapstructure:"js,omitempty"` + + // Configure PHP language-specific instrumentation libraries. + // Each entry's key identifies a particular instrumentation library. The + // corresponding value configures it. + // If omitted, instrumentation defaults are used. + // + Php ExperimentalLanguageSpecificInstrumentation `json:"php,omitempty" yaml:"php,omitempty" mapstructure:"php,omitempty"` + + // Configure Python language-specific instrumentation libraries. + // Each entry's key identifies a particular instrumentation library. The + // corresponding value configures it. + // If omitted, instrumentation defaults are used. + // + Python ExperimentalLanguageSpecificInstrumentation `json:"python,omitempty" yaml:"python,omitempty" mapstructure:"python,omitempty"` + + // Configure Ruby language-specific instrumentation libraries. + // Each entry's key identifies a particular instrumentation library. The + // corresponding value configures it. + // If omitted, instrumentation defaults are used. + // + Ruby ExperimentalLanguageSpecificInstrumentation `json:"ruby,omitempty" yaml:"ruby,omitempty" mapstructure:"ruby,omitempty"` + + // Configure Rust language-specific instrumentation libraries. + // Each entry's key identifies a particular instrumentation library. The + // corresponding value configures it. + // If omitted, instrumentation defaults are used. + // + Rust ExperimentalLanguageSpecificInstrumentation `json:"rust,omitempty" yaml:"rust,omitempty" mapstructure:"rust,omitempty"` + + // Configure Swift language-specific instrumentation libraries. + // Each entry's key identifies a particular instrumentation library. The + // corresponding value configures it. + // If omitted, instrumentation defaults are used. + // + Swift ExperimentalLanguageSpecificInstrumentation `json:"swift,omitempty" yaml:"swift,omitempty" mapstructure:"swift,omitempty"` +} + +type ExperimentalJaegerRemoteSampler struct { + // Configure the endpoint of the jaeger remote sampling service. + // Property is required and must be non-null. + // + Endpoint string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` + + // Configure the initial sampler used before first configuration is fetched. + // Property is required and must be non-null. + // + InitialSampler Sampler `json:"initial_sampler" yaml:"initial_sampler" mapstructure:"initial_sampler"` + + // Configure the polling interval (in milliseconds) to fetch from the remote + // sampling service. + // If omitted or null, 60000 is used. + // + Interval ExperimentalJaegerRemoteSamplerInterval `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` +} + +// Configure the polling interval (in milliseconds) to fetch from the remote +// sampling service. +// If omitted or null, 60000 is used. +type ExperimentalJaegerRemoteSamplerInterval *int + +type ExperimentalLanguageSpecificInstrumentation map[string]map[string]interface{} + +type ExperimentalLoggerConfig struct { + // Configure if the logger is enabled or not. + // If omitted or null, false is used. + // + Disabled ExperimentalLoggerConfigDisabled `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` + + // Configure severity filtering. + // Log records with an non-zero (i.e. unspecified) severity number which is less + // than minimum_severity are not processed. + // Values include: + // * debug: debug, severity number 5. + // * debug2: debug2, severity number 6. + // * debug3: debug3, severity number 7. + // * debug4: debug4, severity number 8. + // * error: error, severity number 17. + // * error2: error2, severity number 18. + // * error3: error3, severity number 19. + // * error4: error4, severity number 20. + // * fatal: fatal, severity number 21. + // * fatal2: fatal2, severity number 22. + // * fatal3: fatal3, severity number 23. + // * fatal4: fatal4, severity number 24. + // * info: info, severity number 9. + // * info2: info2, severity number 10. + // * info3: info3, severity number 11. + // * info4: info4, severity number 12. + // * trace: trace, severity number 1. + // * trace2: trace2, severity number 2. + // * trace3: trace3, severity number 3. + // * trace4: trace4, severity number 4. + // * warn: warn, severity number 13. + // * warn2: warn2, severity number 14. + // * warn3: warn3, severity number 15. + // * warn4: warn4, severity number 16. + // If omitted, severity filtering is not applied. + // + MinimumSeverity *SeverityNumber `json:"minimum_severity,omitempty" yaml:"minimum_severity,omitempty" mapstructure:"minimum_severity,omitempty"` + + // Configure trace based filtering. + // If true, log records associated with unsampled trace contexts traces are not + // processed. If false, or if a log record is not associated with a trace context, + // trace based filtering is not applied. + // If omitted or null, trace based filtering is not applied. + // + TraceBased ExperimentalLoggerConfigTraceBased `json:"trace_based,omitempty" yaml:"trace_based,omitempty" mapstructure:"trace_based,omitempty"` +} + +// Configure if the logger is enabled or not. +// If omitted or null, false is used. +type ExperimentalLoggerConfigDisabled *bool + +// Configure trace based filtering. +// If true, log records associated with unsampled trace contexts traces are not +// processed. If false, or if a log record is not associated with a trace context, +// trace based filtering is not applied. +// If omitted or null, trace based filtering is not applied. +type ExperimentalLoggerConfigTraceBased *bool + +type ExperimentalLoggerConfigurator struct { + // Configure the default logger config used there is no matching entry in + // .logger_configurator/development.loggers. + // If omitted, unmatched .loggers use default values as described in + // ExperimentalLoggerConfig. + // + DefaultConfig *ExperimentalLoggerConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` + + // Configure loggers. + // If omitted, all loggers use .default_config. + // + Loggers []ExperimentalLoggerMatcherAndConfig `json:"loggers,omitempty" yaml:"loggers,omitempty" mapstructure:"loggers,omitempty"` +} + +type ExperimentalLoggerMatcherAndConfig struct { + // The logger config. + // Property is required and must be non-null. + // + Config ExperimentalLoggerConfig `json:"config" yaml:"config" mapstructure:"config"` + + // Configure logger names to match, evaluated as follows: + // + // * If the logger name exactly matches. + // * If the logger name matches the wildcard pattern, where '?' matches any + // single character and '*' matches any number of characters including none. + // Property is required and must be non-null. + // + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type ExperimentalMeterConfig struct { + // Configure if the meter is enabled or not. + // If omitted, false is used. + // + Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` +} + +type ExperimentalMeterConfigurator struct { + // Configure the default meter config used there is no matching entry in + // .meter_configurator/development.meters. + // If omitted, unmatched .meters use default values as described in + // ExperimentalMeterConfig. + // + DefaultConfig *ExperimentalMeterConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` + + // Configure meters. + // If omitted, all meters used .default_config. + // + Meters []ExperimentalMeterMatcherAndConfig `json:"meters,omitempty" yaml:"meters,omitempty" mapstructure:"meters,omitempty"` +} + +type ExperimentalMeterMatcherAndConfig struct { + // The meter config. + // Property is required and must be non-null. + // + Config ExperimentalMeterConfig `json:"config" yaml:"config" mapstructure:"config"` + + // Configure meter names to match, evaluated as follows: + // + // * If the meter name exactly matches. + // * If the meter name matches the wildcard pattern, where '?' matches any single + // character and '*' matches any number of characters including none. + // Property is required and must be non-null. + // + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type ExperimentalOTLPFileExporter struct { + // Configure output stream. + // Values include stdout, or scheme+destination. For example: + // file:///path/to/file.jsonl. + // If omitted or null, stdout is used. + // + OutputStream ExperimentalOTLPFileExporterOutputStream `json:"output_stream,omitempty" yaml:"output_stream,omitempty" mapstructure:"output_stream,omitempty"` +} + +// Configure output stream. +// Values include stdout, or scheme+destination. For example: +// file:///path/to/file.jsonl. +// If omitted or null, stdout is used. +type ExperimentalOTLPFileExporterOutputStream *string + +type ExperimentalOTLPFileMetricExporter struct { + // Configure default histogram aggregation. + // Values include: + // * base2_exponential_bucket_histogram: Use base2 exponential histogram as the + // default aggregation for histogram instruments. + // * explicit_bucket_histogram: Use explicit bucket histogram as the default + // aggregation for histogram instruments. + // If omitted, explicit_bucket_histogram is used. + // + DefaultHistogramAggregation *ExporterDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` + + // Configure output stream. + // Values include stdout, or scheme+destination. For example: + // file:///path/to/file.jsonl. + // If omitted or null, stdout is used. + // + OutputStream ExperimentalOTLPFileMetricExporterOutputStream `json:"output_stream,omitempty" yaml:"output_stream,omitempty" mapstructure:"output_stream,omitempty"` + + // Configure temporality preference. + // Values include: + // * cumulative: Use cumulative aggregation temporality for all instrument types. + // * delta: Use delta aggregation for all instrument types except up down counter + // and asynchronous up down counter. + // * low_memory: Use delta aggregation temporality for counter and histogram + // instrument types. Use cumulative aggregation temporality for all other + // instrument types. + // If omitted, cumulative is used. + // + TemporalityPreference *ExporterTemporalityPreference `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` +} + +// Configure output stream. +// Values include stdout, or scheme+destination. For example: +// file:///path/to/file.jsonl. +// If omitted or null, stdout is used. +type ExperimentalOTLPFileMetricExporterOutputStream *string + +type ExperimentalPeerInstrumentation struct { + // Configure the service mapping for instrumentations following peer.service + // semantic conventions. + // See peer.service semantic conventions: + // https://opentelemetry.io/docs/specs/semconv/general/attributes/#general-remote-service-attributes + // If omitted, no peer service mappings are used. + // + ServiceMapping []ExperimentalPeerServiceMapping `json:"service_mapping,omitempty" yaml:"service_mapping,omitempty" mapstructure:"service_mapping,omitempty"` +} + +type ExperimentalPeerServiceMapping struct { + // The IP address to map. + // Property is required and must be non-null. + // + Peer string `json:"peer" yaml:"peer" mapstructure:"peer"` + + // The logical name corresponding to the IP address of .peer. + // Property is required and must be non-null. + // + Service string `json:"service" yaml:"service" mapstructure:"service"` +} + +type ExperimentalProbabilitySampler struct { + // Configure ratio. + // If omitted or null, 1.0 is used. + // + Ratio ExperimentalProbabilitySamplerRatio `json:"ratio,omitempty" yaml:"ratio,omitempty" mapstructure:"ratio,omitempty"` +} + +// Configure ratio. +// If omitted or null, 1.0 is used. +type ExperimentalProbabilitySamplerRatio *float64 + +type ExperimentalProcessResourceDetector map[string]interface{} + +type ExperimentalPrometheusMetricExporter struct { + // Configure host. + // If omitted or null, localhost is used. + // + Host ExperimentalPrometheusMetricExporterHost `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Configure port. + // If omitted or null, 9464 is used. + // + Port ExperimentalPrometheusMetricExporterPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` + + // Configure how metric names are translated to Prometheus metric names. + // Values include: + // * no_translation: Special character escaping is disabled. Type and unit + // suffixes are disabled. Metric names are unaltered. + // * no_utf8_escaping_with_suffixes: Special character escaping is disabled. Type + // and unit suffixes are enabled. + // * underscore_escaping_with_suffixes: Special character escaping is enabled. + // Type and unit suffixes are enabled. + // * underscore_escaping_without_suffixes: Special character escaping is enabled. + // Type and unit suffixes are disabled. This represents classic Prometheus metric + // name compatibility. + // If omitted, underscore_escaping_with_suffixes is used. + // + TranslationStrategy *ExperimentalPrometheusTranslationStrategy `json:"translation_strategy,omitempty" yaml:"translation_strategy,omitempty" mapstructure:"translation_strategy,omitempty"` + + // Configure Prometheus Exporter to add resource attributes as metrics attributes, + // where the resource attribute keys match the patterns. + // If omitted, no resource attributes are added. + // + WithResourceConstantLabels *IncludeExclude `json:"with_resource_constant_labels,omitempty" yaml:"with_resource_constant_labels,omitempty" mapstructure:"with_resource_constant_labels,omitempty"` + + // Configure Prometheus Exporter to produce metrics without a scope info metric. + // If omitted or null, false is used. + // + WithoutScopeInfo ExperimentalPrometheusMetricExporterWithoutScopeInfo `json:"without_scope_info,omitempty" yaml:"without_scope_info,omitempty" mapstructure:"without_scope_info,omitempty"` + + // Configure Prometheus Exporter to produce metrics without a target info metric + // for the resource. + // If omitted or null, false is used. + // + WithoutTargetInfo ExperimentalPrometheusMetricExporterWithoutTargetInfo `json:"without_target_info,omitempty" yaml:"without_target_info,omitempty" mapstructure:"without_target_info,omitempty"` +} + +// Configure host. +// If omitted or null, localhost is used. +type ExperimentalPrometheusMetricExporterHost *string + +// Configure port. +// If omitted or null, 9464 is used. +type ExperimentalPrometheusMetricExporterPort *int + +// Configure Prometheus Exporter to produce metrics without a scope info metric. +// If omitted or null, false is used. +type ExperimentalPrometheusMetricExporterWithoutScopeInfo *bool + +// Configure Prometheus Exporter to produce metrics without a target info metric +// for the resource. +// If omitted or null, false is used. +type ExperimentalPrometheusMetricExporterWithoutTargetInfo *bool + +type ExperimentalPrometheusTranslationStrategy string + +const ExperimentalPrometheusTranslationStrategyNoTranslation ExperimentalPrometheusTranslationStrategy = "no_translation" +const ExperimentalPrometheusTranslationStrategyNoUtf8EscapingWithSuffixes ExperimentalPrometheusTranslationStrategy = "no_utf8_escaping_with_suffixes" +const ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithSuffixes ExperimentalPrometheusTranslationStrategy = "underscore_escaping_with_suffixes" +const ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithoutSuffixes ExperimentalPrometheusTranslationStrategy = "underscore_escaping_without_suffixes" + +type ExperimentalResourceDetection struct { + // Configure attributes provided by resource detectors. + // If omitted, all attributes from resource detectors are added. + // + Attributes *IncludeExclude `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` + + // Configure resource detectors. + // Resource detector names are dependent on the SDK language ecosystem. Please + // consult documentation for each respective language. + // If omitted, no resource detectors are enabled. + // + Detectors []ExperimentalResourceDetector `json:"detectors,omitempty" yaml:"detectors,omitempty" mapstructure:"detectors,omitempty"` +} + +type ExperimentalResourceDetector struct { + // Enable the container resource detector, which populates container.* attributes. + // If omitted, ignore. + // + Container ExperimentalContainerResourceDetector `json:"container,omitempty" yaml:"container,omitempty" mapstructure:"container,omitempty"` + + // Enable the host resource detector, which populates host.* and os.* attributes. + // If omitted, ignore. + // + Host ExperimentalHostResourceDetector `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Enable the process resource detector, which populates process.* attributes. + // If omitted, ignore. + // + Process ExperimentalProcessResourceDetector `json:"process,omitempty" yaml:"process,omitempty" mapstructure:"process,omitempty"` + + // Enable the service detector, which populates service.name based on the + // OTEL_SERVICE_NAME environment variable and service.instance.id. + // If omitted, ignore. + // + Service ExperimentalServiceResourceDetector `json:"service,omitempty" yaml:"service,omitempty" mapstructure:"service,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type ExperimentalServiceResourceDetector map[string]interface{} + +type ExperimentalSpanParent string + +const ExperimentalSpanParentLocal ExperimentalSpanParent = "local" +const ExperimentalSpanParentNone ExperimentalSpanParent = "none" +const ExperimentalSpanParentRemote ExperimentalSpanParent = "remote" + +type ExperimentalTracerConfig struct { + // Configure if the tracer is enabled or not. + // If omitted, false is used. + // + Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` +} + +type ExperimentalTracerConfigurator struct { + // Configure the default tracer config used there is no matching entry in + // .tracer_configurator/development.tracers. + // If omitted, unmatched .tracers use default values as described in + // ExperimentalTracerConfig. + // + DefaultConfig *ExperimentalTracerConfig `json:"default_config,omitempty" yaml:"default_config,omitempty" mapstructure:"default_config,omitempty"` + + // Configure tracers. + // If omitted, all tracers use .default_config. + // + Tracers []ExperimentalTracerMatcherAndConfig `json:"tracers,omitempty" yaml:"tracers,omitempty" mapstructure:"tracers,omitempty"` +} + +type ExperimentalTracerMatcherAndConfig struct { + // The tracer config. + // Property is required and must be non-null. + // + Config ExperimentalTracerConfig `json:"config" yaml:"config" mapstructure:"config"` + + // Configure tracer names to match, evaluated as follows: + // + // * If the tracer name exactly matches. + // * If the tracer name matches the wildcard pattern, where '?' matches any + // single character and '*' matches any number of characters including none. + // Property is required and must be non-null. + // + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type ExplicitBucketHistogramAggregation struct { + // Configure bucket boundaries. + // If omitted, [0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, + // 10000] is used. + // + Boundaries []float64 `json:"boundaries,omitempty" yaml:"boundaries,omitempty" mapstructure:"boundaries,omitempty"` + + // Configure record min and max. + // If omitted or null, true is used. + // + RecordMinMax ExplicitBucketHistogramAggregationRecordMinMax `json:"record_min_max,omitempty" yaml:"record_min_max,omitempty" mapstructure:"record_min_max,omitempty"` +} + +// Configure record min and max. +// If omitted or null, true is used. +type ExplicitBucketHistogramAggregationRecordMinMax *bool + +type ExporterDefaultHistogramAggregation string + +const ExporterDefaultHistogramAggregationBase2ExponentialBucketHistogram ExporterDefaultHistogramAggregation = "base2_exponential_bucket_histogram" +const ExporterDefaultHistogramAggregationExplicitBucketHistogram ExporterDefaultHistogramAggregation = "explicit_bucket_histogram" + +type ExporterTemporalityPreference string + +const ExporterTemporalityPreferenceCumulative ExporterTemporalityPreference = "cumulative" +const ExporterTemporalityPreferenceDelta ExporterTemporalityPreference = "delta" +const ExporterTemporalityPreferenceLowMemory ExporterTemporalityPreference = "low_memory" + +type GrpcTls struct { + // Configure certificate used to verify a server's TLS credentials. + // Absolute path to certificate file in PEM format. + // If omitted or null, system default certificate verification is used for secure + // connections. + // + CaFile GrpcTlsCaFile `json:"ca_file,omitempty" yaml:"ca_file,omitempty" mapstructure:"ca_file,omitempty"` + + // Configure mTLS client certificate. + // Absolute path to client certificate file in PEM format. If set, .client_key + // must also be set. + // If omitted or null, mTLS is not used. + // + CertFile GrpcTlsCertFile `json:"cert_file,omitempty" yaml:"cert_file,omitempty" mapstructure:"cert_file,omitempty"` + + // Configure client transport security for the exporter's connection. + // Only applicable when .endpoint is provided without http or https scheme. + // Implementations may choose to ignore .insecure. + // If omitted or null, false is used. + // + Insecure GrpcTlsInsecure `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // Configure mTLS private client key. + // Absolute path to client key file in PEM format. If set, .client_certificate + // must also be set. + // If omitted or null, mTLS is not used. + // + KeyFile GrpcTlsKeyFile `json:"key_file,omitempty" yaml:"key_file,omitempty" mapstructure:"key_file,omitempty"` +} + +// Configure certificate used to verify a server's TLS credentials. +// Absolute path to certificate file in PEM format. +// If omitted or null, system default certificate verification is used for secure +// connections. +type GrpcTlsCaFile *string + +// Configure mTLS client certificate. +// Absolute path to client certificate file in PEM format. If set, .client_key must +// also be set. +// If omitted or null, mTLS is not used. +type GrpcTlsCertFile *string + +// Configure client transport security for the exporter's connection. +// Only applicable when .endpoint is provided without http or https scheme. +// Implementations may choose to ignore .insecure. +// If omitted or null, false is used. +type GrpcTlsInsecure *bool + +// Configure mTLS private client key. +// Absolute path to client key file in PEM format. If set, .client_certificate must +// also be set. +// If omitted or null, mTLS is not used. +type GrpcTlsKeyFile *string + +type HttpTls struct { + // Configure certificate used to verify a server's TLS credentials. + // Absolute path to certificate file in PEM format. + // If omitted or null, system default certificate verification is used for secure + // connections. + // + CaFile HttpTlsCaFile `json:"ca_file,omitempty" yaml:"ca_file,omitempty" mapstructure:"ca_file,omitempty"` + + // Configure mTLS client certificate. + // Absolute path to client certificate file in PEM format. If set, .client_key + // must also be set. + // If omitted or null, mTLS is not used. + // + CertFile HttpTlsCertFile `json:"cert_file,omitempty" yaml:"cert_file,omitempty" mapstructure:"cert_file,omitempty"` + + // Configure mTLS private client key. + // Absolute path to client key file in PEM format. If set, .client_certificate + // must also be set. + // If omitted or null, mTLS is not used. + // + KeyFile HttpTlsKeyFile `json:"key_file,omitempty" yaml:"key_file,omitempty" mapstructure:"key_file,omitempty"` +} + +// Configure certificate used to verify a server's TLS credentials. +// Absolute path to certificate file in PEM format. +// If omitted or null, system default certificate verification is used for secure +// connections. +type HttpTlsCaFile *string + +// Configure mTLS client certificate. +// Absolute path to client certificate file in PEM format. If set, .client_key must +// also be set. +// If omitted or null, mTLS is not used. +type HttpTlsCertFile *string + +// Configure mTLS private client key. +// Absolute path to client key file in PEM format. If set, .client_certificate must +// also be set. +// If omitted or null, mTLS is not used. +type HttpTlsKeyFile *string + +type IncludeExclude struct { + // Configure list of value patterns to exclude. Applies after .included (i.e. + // excluded has higher priority than included). + // Values are evaluated to match as follows: + // * If the value exactly matches. + // * If the value matches the wildcard pattern, where '?' matches any single + // character and '*' matches any number of characters including none. + // If omitted, .included attributes are included. + // + Excluded []string `json:"excluded,omitempty" yaml:"excluded,omitempty" mapstructure:"excluded,omitempty"` + + // Configure list of value patterns to include. + // Values are evaluated to match as follows: + // * If the value exactly matches. + // * If the value matches the wildcard pattern, where '?' matches any single + // character and '*' matches any number of characters including none. + // If omitted, all values are included. + // + Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` +} + +type InstrumentType string + +const InstrumentTypeCounter InstrumentType = "counter" +const InstrumentTypeGauge InstrumentType = "gauge" +const InstrumentTypeHistogram InstrumentType = "histogram" +const InstrumentTypeObservableCounter InstrumentType = "observable_counter" +const InstrumentTypeObservableGauge InstrumentType = "observable_gauge" +const InstrumentTypeObservableUpDownCounter InstrumentType = "observable_up_down_counter" +const InstrumentTypeUpDownCounter InstrumentType = "up_down_counter" + +type JaegerPropagator map[string]interface{} + +type LastValueAggregation map[string]interface{} + +type LogRecordExporter struct { + // Configure exporter to be console. + // If omitted, ignore. + // + Console ConsoleExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` + + // Configure exporter to be OTLP with file transport. + // If omitted, ignore. + // + OTLPFileDevelopment *ExperimentalOTLPFileExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` + + // Configure exporter to be OTLP with gRPC transport. + // If omitted, ignore. + // + OTLPGrpc *OTLPGrpcExporter `json:"otlp_grpc,omitempty" yaml:"otlp_grpc,omitempty" mapstructure:"otlp_grpc,omitempty"` + + // Configure exporter to be OTLP with HTTP transport. + // If omitted, ignore. + // + OTLPHttp *OTLPHttpExporter `json:"otlp_http,omitempty" yaml:"otlp_http,omitempty" mapstructure:"otlp_http,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type LogRecordLimits struct { + // Configure max attribute count. Overrides + // .attribute_limits.attribute_count_limit. + // Value must be non-negative. + // If omitted or null, 128 is used. + // + AttributeCountLimit LogRecordLimitsAttributeCountLimit `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` + + // Configure max attribute value size. Overrides + // .attribute_limits.attribute_value_length_limit. + // Value must be non-negative. + // If omitted or null, there is no limit. + // + AttributeValueLengthLimit LogRecordLimitsAttributeValueLengthLimit `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` +} + +// Configure max attribute count. Overrides +// .attribute_limits.attribute_count_limit. +// Value must be non-negative. +// If omitted or null, 128 is used. +type LogRecordLimitsAttributeCountLimit *int + +// Configure max attribute value size. Overrides +// .attribute_limits.attribute_value_length_limit. +// Value must be non-negative. +// If omitted or null, there is no limit. +type LogRecordLimitsAttributeValueLengthLimit *int + +type LogRecordProcessor struct { + // Configure a batch log record processor. + // If omitted, ignore. + // + Batch *BatchLogRecordProcessor `json:"batch,omitempty" yaml:"batch,omitempty" mapstructure:"batch,omitempty"` + + // Configure a simple log record processor. + // If omitted, ignore. + // + Simple *SimpleLogRecordProcessor `json:"simple,omitempty" yaml:"simple,omitempty" mapstructure:"simple,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type LoggerProvider struct { + // Configure log record limits. See also attribute_limits. + // If omitted, default values as described in LogRecordLimits are used. + // + Limits *LogRecordLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Configure loggers. + // If omitted, all loggers use default values as described in + // ExperimentalLoggerConfig. + // + LoggerConfiguratorDevelopment *ExperimentalLoggerConfigurator `json:"logger_configurator/development,omitempty" yaml:"logger_configurator/development,omitempty" mapstructure:"logger_configurator/development,omitempty"` + + // Configure log record processors. + // Property is required and must be non-null. + // + Processors []LogRecordProcessor `json:"processors" yaml:"processors" mapstructure:"processors"` +} + +type MeterProvider struct { + // Configure the exemplar filter. + // Values include: + // * always_off: ExemplarFilter which makes no measurements eligible for being an + // Exemplar. + // * always_on: ExemplarFilter which makes all measurements eligible for being an + // Exemplar. + // * trace_based: ExemplarFilter which makes measurements recorded in the context + // of a sampled parent span eligible for being an Exemplar. + // If omitted, trace_based is used. + // + ExemplarFilter *ExemplarFilter `json:"exemplar_filter,omitempty" yaml:"exemplar_filter,omitempty" mapstructure:"exemplar_filter,omitempty"` + + // Configure meters. + // If omitted, all meters use default values as described in + // ExperimentalMeterConfig. + // + MeterConfiguratorDevelopment *ExperimentalMeterConfigurator `json:"meter_configurator/development,omitempty" yaml:"meter_configurator/development,omitempty" mapstructure:"meter_configurator/development,omitempty"` + + // Configure metric readers. + // Property is required and must be non-null. + // + Readers []MetricReader `json:"readers" yaml:"readers" mapstructure:"readers"` + + // Configure views. + // Each view has a selector which determines the instrument(s) it applies to, and + // a configuration for the resulting stream(s). + // If omitted, no views are registered. + // + Views []View `json:"views,omitempty" yaml:"views,omitempty" mapstructure:"views,omitempty"` +} + +type MetricProducer struct { + // Configure metric producer to be opencensus. + // If omitted, ignore. + // + Opencensus OpenCensusMetricProducer `json:"opencensus,omitempty" yaml:"opencensus,omitempty" mapstructure:"opencensus,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type MetricReader struct { + // Configure a periodic metric reader. + // If omitted, ignore. + // + Periodic *PeriodicMetricReader `json:"periodic,omitempty" yaml:"periodic,omitempty" mapstructure:"periodic,omitempty"` + + // Configure a pull based metric reader. + // If omitted, ignore. + // + Pull *PullMetricReader `json:"pull,omitempty" yaml:"pull,omitempty" mapstructure:"pull,omitempty"` +} + +type NameStringValuePair struct { + // The name of the pair. + // Property is required and must be non-null. + // + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The value of the pair. + // Property must be present, but if null the behavior is dependent on usage + // context. + // + Value NameStringValuePairValue `json:"value" yaml:"value" mapstructure:"value"` +} + +// The value of the pair. +// Property must be present, but if null the behavior is dependent on usage +// context. +type NameStringValuePairValue *string + +type OTLPGrpcExporter struct { + // Configure compression. + // Known values include: gzip, none. Implementations may support other compression + // algorithms. + // If omitted or null, none is used. + // + Compression OTLPGrpcExporterCompression `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` + + // Configure endpoint. + // If omitted or null, http://localhost:4317 is used. + // + Endpoint OTLPGrpcExporterEndpoint `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Configure headers. Entries have higher priority than entries from + // .headers_list. + // If an entry's .value is null, the entry is ignored. + // If omitted, no headers are added. + // + Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // Configure headers. Entries have lower priority than entries from .headers. + // The value is a list of comma separated key-value pairs matching the format of + // OTEL_EXPORTER_OTLP_HEADERS. See + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#configuration-options + // for details. + // If omitted or null, no headers are added. + // + HeadersList OTLPGrpcExporterHeadersList `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` + + // Configure max time (in milliseconds) to wait for each export. + // Value must be non-negative. A value of 0 indicates no limit (infinity). + // If omitted or null, 10000 is used. + // + Timeout OTLPGrpcExporterTimeout `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` + + // Configure TLS settings for the exporter. + // If omitted, system default TLS settings are used. + // + Tls *GrpcTls `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` +} + +// Configure compression. +// Known values include: gzip, none. Implementations may support other compression +// algorithms. +// If omitted or null, none is used. +type OTLPGrpcExporterCompression *string + +// Configure endpoint. +// If omitted or null, http://localhost:4317 is used. +type OTLPGrpcExporterEndpoint *string + +// Configure headers. Entries have lower priority than entries from .headers. +// The value is a list of comma separated key-value pairs matching the format of +// OTEL_EXPORTER_OTLP_HEADERS. See +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#configuration-options +// for details. +// If omitted or null, no headers are added. +type OTLPGrpcExporterHeadersList *string + +// Configure max time (in milliseconds) to wait for each export. +// Value must be non-negative. A value of 0 indicates no limit (infinity). +// If omitted or null, 10000 is used. +type OTLPGrpcExporterTimeout *int + +type OTLPGrpcMetricExporter struct { + // Configure compression. + // Known values include: gzip, none. Implementations may support other compression + // algorithms. + // If omitted or null, none is used. + // + Compression OTLPGrpcMetricExporterCompression `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` + + // Configure default histogram aggregation. + // Values include: + // * base2_exponential_bucket_histogram: Use base2 exponential histogram as the + // default aggregation for histogram instruments. + // * explicit_bucket_histogram: Use explicit bucket histogram as the default + // aggregation for histogram instruments. + // If omitted, explicit_bucket_histogram is used. + // + DefaultHistogramAggregation *ExporterDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` + + // Configure endpoint. + // If omitted or null, http://localhost:4317 is used. + // + Endpoint OTLPGrpcMetricExporterEndpoint `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Configure headers. Entries have higher priority than entries from + // .headers_list. + // If an entry's .value is null, the entry is ignored. + // If omitted, no headers are added. + // + Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // Configure headers. Entries have lower priority than entries from .headers. + // The value is a list of comma separated key-value pairs matching the format of + // OTEL_EXPORTER_OTLP_HEADERS. See + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#configuration-options + // for details. + // If omitted or null, no headers are added. + // + HeadersList OTLPGrpcMetricExporterHeadersList `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` + + // Configure temporality preference. + // Values include: + // * cumulative: Use cumulative aggregation temporality for all instrument types. + // * delta: Use delta aggregation for all instrument types except up down counter + // and asynchronous up down counter. + // * low_memory: Use delta aggregation temporality for counter and histogram + // instrument types. Use cumulative aggregation temporality for all other + // instrument types. + // If omitted, cumulative is used. + // + TemporalityPreference *ExporterTemporalityPreference `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` + + // Configure max time (in milliseconds) to wait for each export. + // Value must be non-negative. A value of 0 indicates no limit (infinity). + // If omitted or null, 10000 is used. + // + Timeout OTLPGrpcMetricExporterTimeout `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` + + // Configure TLS settings for the exporter. + // If omitted, system default TLS settings are used. + // + Tls *GrpcTls `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` +} + +// Configure compression. +// Known values include: gzip, none. Implementations may support other compression +// algorithms. +// If omitted or null, none is used. +type OTLPGrpcMetricExporterCompression *string + +// Configure endpoint. +// If omitted or null, http://localhost:4317 is used. +type OTLPGrpcMetricExporterEndpoint *string + +// Configure headers. Entries have lower priority than entries from .headers. +// The value is a list of comma separated key-value pairs matching the format of +// OTEL_EXPORTER_OTLP_HEADERS. See +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#configuration-options +// for details. +// If omitted or null, no headers are added. +type OTLPGrpcMetricExporterHeadersList *string + +// Configure max time (in milliseconds) to wait for each export. +// Value must be non-negative. A value of 0 indicates no limit (infinity). +// If omitted or null, 10000 is used. +type OTLPGrpcMetricExporterTimeout *int + +type OTLPHttpEncoding string + +const OTLPHttpEncodingJson OTLPHttpEncoding = "json" +const OTLPHttpEncodingProtobuf OTLPHttpEncoding = "protobuf" + +type OTLPHttpExporter struct { + // Configure compression. + // Known values include: gzip, none. Implementations may support other compression + // algorithms. + // If omitted or null, none is used. + // + Compression OTLPHttpExporterCompression `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` + + // Configure the encoding used for messages. + // Implementations may not support json. + // Values include: + // * json: Protobuf JSON encoding. + // * protobuf: Protobuf binary encoding. + // If omitted, protobuf is used. + // + Encoding *OTLPHttpEncoding `json:"encoding,omitempty" yaml:"encoding,omitempty" mapstructure:"encoding,omitempty"` + + // Configure endpoint, including the signal specific path. + // If omitted or null, the http://localhost:4318/v1/{signal} (where signal is + // 'traces', 'logs', or 'metrics') is used. + // + Endpoint OTLPHttpExporterEndpoint `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Configure headers. Entries have higher priority than entries from + // .headers_list. + // If an entry's .value is null, the entry is ignored. + // If omitted, no headers are added. + // + Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // Configure headers. Entries have lower priority than entries from .headers. + // The value is a list of comma separated key-value pairs matching the format of + // OTEL_EXPORTER_OTLP_HEADERS. See + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#configuration-options + // for details. + // If omitted or null, no headers are added. + // + HeadersList OTLPHttpExporterHeadersList `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` + + // Configure max time (in milliseconds) to wait for each export. + // Value must be non-negative. A value of 0 indicates no limit (infinity). + // If omitted or null, 10000 is used. + // + Timeout OTLPHttpExporterTimeout `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` + + // Configure TLS settings for the exporter. + // If omitted, system default TLS settings are used. + // + Tls *HttpTls `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` +} + +// Configure compression. +// Known values include: gzip, none. Implementations may support other compression +// algorithms. +// If omitted or null, none is used. +type OTLPHttpExporterCompression *string + +// Configure endpoint, including the signal specific path. +// If omitted or null, the http://localhost:4318/v1/{signal} (where signal is +// 'traces', 'logs', or 'metrics') is used. +type OTLPHttpExporterEndpoint *string + +// Configure headers. Entries have lower priority than entries from .headers. +// The value is a list of comma separated key-value pairs matching the format of +// OTEL_EXPORTER_OTLP_HEADERS. See +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#configuration-options +// for details. +// If omitted or null, no headers are added. +type OTLPHttpExporterHeadersList *string + +// Configure max time (in milliseconds) to wait for each export. +// Value must be non-negative. A value of 0 indicates no limit (infinity). +// If omitted or null, 10000 is used. +type OTLPHttpExporterTimeout *int + +type OTLPHttpMetricExporter struct { + // Configure compression. + // Known values include: gzip, none. Implementations may support other compression + // algorithms. + // If omitted or null, none is used. + // + Compression OTLPHttpMetricExporterCompression `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` + + // Configure default histogram aggregation. + // Values include: + // * base2_exponential_bucket_histogram: Use base2 exponential histogram as the + // default aggregation for histogram instruments. + // * explicit_bucket_histogram: Use explicit bucket histogram as the default + // aggregation for histogram instruments. + // If omitted, explicit_bucket_histogram is used. + // + DefaultHistogramAggregation *ExporterDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` + + // Configure the encoding used for messages. + // Implementations may not support json. + // Values include: + // * json: Protobuf JSON encoding. + // * protobuf: Protobuf binary encoding. + // If omitted, protobuf is used. + // + Encoding *OTLPHttpEncoding `json:"encoding,omitempty" yaml:"encoding,omitempty" mapstructure:"encoding,omitempty"` + + // Configure endpoint. + // If omitted or null, http://localhost:4318/v1/metrics is used. + // + Endpoint OTLPHttpMetricExporterEndpoint `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // Configure headers. Entries have higher priority than entries from + // .headers_list. + // If an entry's .value is null, the entry is ignored. + // If omitted, no headers are added. + // + Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // Configure headers. Entries have lower priority than entries from .headers. + // The value is a list of comma separated key-value pairs matching the format of + // OTEL_EXPORTER_OTLP_HEADERS. See + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#configuration-options + // for details. + // If omitted or null, no headers are added. + // + HeadersList OTLPHttpMetricExporterHeadersList `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` + + // Configure temporality preference. + // Values include: + // * cumulative: Use cumulative aggregation temporality for all instrument types. + // * delta: Use delta aggregation for all instrument types except up down counter + // and asynchronous up down counter. + // * low_memory: Use delta aggregation temporality for counter and histogram + // instrument types. Use cumulative aggregation temporality for all other + // instrument types. + // If omitted, cumulative is used. + // + TemporalityPreference *ExporterTemporalityPreference `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` + + // Configure max time (in milliseconds) to wait for each export. + // Value must be non-negative. A value of 0 indicates no limit (infinity). + // If omitted or null, 10000 is used. + // + Timeout OTLPHttpMetricExporterTimeout `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` + + // Configure TLS settings for the exporter. + // If omitted, system default TLS settings are used. + // + Tls *HttpTls `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` +} + +// Configure compression. +// Known values include: gzip, none. Implementations may support other compression +// algorithms. +// If omitted or null, none is used. +type OTLPHttpMetricExporterCompression *string + +// Configure endpoint. +// If omitted or null, http://localhost:4318/v1/metrics is used. +type OTLPHttpMetricExporterEndpoint *string + +// Configure headers. Entries have lower priority than entries from .headers. +// The value is a list of comma separated key-value pairs matching the format of +// OTEL_EXPORTER_OTLP_HEADERS. See +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#configuration-options +// for details. +// If omitted or null, no headers are added. +type OTLPHttpMetricExporterHeadersList *string + +// Configure max time (in milliseconds) to wait for each export. +// Value must be non-negative. A value of 0 indicates no limit (infinity). +// If omitted or null, 10000 is used. +type OTLPHttpMetricExporterTimeout *int + +type OpenCensusMetricProducer map[string]interface{} + +type OpenTelemetryConfiguration struct { + // Configure general attribute limits. See also tracer_provider.limits, + // logger_provider.limits. + // If omitted, default values as described in AttributeLimits are used. + // + AttributeLimits *AttributeLimits `json:"attribute_limits,omitempty" yaml:"attribute_limits,omitempty" mapstructure:"attribute_limits,omitempty"` + + // Configure if the SDK is disabled or not. + // If omitted or null, false is used. + // + Disabled OpenTelemetryConfigurationDisabled `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` + + // Defines configuration parameters specific to a particular OpenTelemetry + // distribution or vendor. + // This section provides a standardized location for distribution-specific + // settings + // that are not part of the OpenTelemetry configuration model. + // It allows vendors to expose their own extensions and general configuration + // options. + // If omitted, distribution defaults are used. + // + Distribution Distribution `json:"distribution,omitempty" yaml:"distribution,omitempty" mapstructure:"distribution,omitempty"` + + // The file format version. + // Represented as a string including the semver major, minor version numbers (and + // optionally the meta tag). For example: "0.4", "1.0-rc.2", "1.0" (after stable + // release). + // See + // https://github.com/open-telemetry/opentelemetry-configuration/blob/main/VERSIONING.md + // for more details. + // The yaml format is documented at + // https://github.com/open-telemetry/opentelemetry-configuration/tree/main/schema + // Property is required and must be non-null. + // + FileFormat string `json:"file_format" yaml:"file_format" mapstructure:"file_format"` + + // Configure instrumentation. + // If omitted, instrumentation defaults are used. + // + InstrumentationDevelopment *ExperimentalInstrumentation `json:"instrumentation/development,omitempty" yaml:"instrumentation/development,omitempty" mapstructure:"instrumentation/development,omitempty"` + + // Configure the log level of the internal logger used by the SDK. + // Values include: + // * debug: debug, severity number 5. + // * debug2: debug2, severity number 6. + // * debug3: debug3, severity number 7. + // * debug4: debug4, severity number 8. + // * error: error, severity number 17. + // * error2: error2, severity number 18. + // * error3: error3, severity number 19. + // * error4: error4, severity number 20. + // * fatal: fatal, severity number 21. + // * fatal2: fatal2, severity number 22. + // * fatal3: fatal3, severity number 23. + // * fatal4: fatal4, severity number 24. + // * info: info, severity number 9. + // * info2: info2, severity number 10. + // * info3: info3, severity number 11. + // * info4: info4, severity number 12. + // * trace: trace, severity number 1. + // * trace2: trace2, severity number 2. + // * trace3: trace3, severity number 3. + // * trace4: trace4, severity number 4. + // * warn: warn, severity number 13. + // * warn2: warn2, severity number 14. + // * warn3: warn3, severity number 15. + // * warn4: warn4, severity number 16. + // If omitted, INFO is used. + // + LogLevel *SeverityNumber `json:"log_level,omitempty" yaml:"log_level,omitempty" mapstructure:"log_level,omitempty"` + + // Configure logger provider. + // If omitted, a noop logger provider is used. + // + LoggerProvider *LoggerProvider `json:"logger_provider,omitempty" yaml:"logger_provider,omitempty" mapstructure:"logger_provider,omitempty"` + + // Configure meter provider. + // If omitted, a noop meter provider is used. + // + MeterProvider *MeterProvider `json:"meter_provider,omitempty" yaml:"meter_provider,omitempty" mapstructure:"meter_provider,omitempty"` + + // Configure text map context propagators. + // If omitted, a noop propagator is used. + // + Propagator *Propagator `json:"propagator,omitempty" yaml:"propagator,omitempty" mapstructure:"propagator,omitempty"` + + // Configure resource for all signals. + // If omitted, the default resource is used. + // + Resource *Resource `json:"resource,omitempty" yaml:"resource,omitempty" mapstructure:"resource,omitempty"` + + // Configure tracer provider. + // If omitted, a noop tracer provider is used. + // + TracerProvider *TracerProvider `json:"tracer_provider,omitempty" yaml:"tracer_provider,omitempty" mapstructure:"tracer_provider,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +// Configure if the SDK is disabled or not. +// If omitted or null, false is used. +type OpenTelemetryConfigurationDisabled *bool + +type OpenTracingPropagator map[string]interface{} + +type ParentBasedSampler struct { + // Configure local_parent_not_sampled sampler. + // If omitted, always_off is used. + // + LocalParentNotSampled *Sampler `json:"local_parent_not_sampled,omitempty" yaml:"local_parent_not_sampled,omitempty" mapstructure:"local_parent_not_sampled,omitempty"` + + // Configure local_parent_sampled sampler. + // If omitted, always_on is used. + // + LocalParentSampled *Sampler `json:"local_parent_sampled,omitempty" yaml:"local_parent_sampled,omitempty" mapstructure:"local_parent_sampled,omitempty"` + + // Configure remote_parent_not_sampled sampler. + // If omitted, always_off is used. + // + RemoteParentNotSampled *Sampler `json:"remote_parent_not_sampled,omitempty" yaml:"remote_parent_not_sampled,omitempty" mapstructure:"remote_parent_not_sampled,omitempty"` + + // Configure remote_parent_sampled sampler. + // If omitted, always_on is used. + // + RemoteParentSampled *Sampler `json:"remote_parent_sampled,omitempty" yaml:"remote_parent_sampled,omitempty" mapstructure:"remote_parent_sampled,omitempty"` + + // Configure root sampler. + // If omitted, always_on is used. + // + Root *Sampler `json:"root,omitempty" yaml:"root,omitempty" mapstructure:"root,omitempty"` +} + +type PeriodicMetricReader struct { + // Configure cardinality limits. + // If omitted, default values as described in CardinalityLimits are used. + // + CardinalityLimits *CardinalityLimits `json:"cardinality_limits,omitempty" yaml:"cardinality_limits,omitempty" mapstructure:"cardinality_limits,omitempty"` + + // Configure exporter. + // Property is required and must be non-null. + // + Exporter PushMetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // Configure delay interval (in milliseconds) between start of two consecutive + // exports. + // Value must be non-negative. + // If omitted or null, 60000 is used. + // + Interval PeriodicMetricReaderInterval `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` + + // Configure metric producers. + // If omitted, no metric producers are added. + // + Producers []MetricProducer `json:"producers,omitempty" yaml:"producers,omitempty" mapstructure:"producers,omitempty"` + + // Configure maximum allowed time (in milliseconds) to export data. + // Value must be non-negative. A value of 0 indicates no limit (infinity). + // If omitted or null, 30000 is used. + // + Timeout PeriodicMetricReaderTimeout `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` +} + +// Configure delay interval (in milliseconds) between start of two consecutive +// exports. +// Value must be non-negative. +// If omitted or null, 60000 is used. +type PeriodicMetricReaderInterval *int + +// Configure maximum allowed time (in milliseconds) to export data. +// Value must be non-negative. A value of 0 indicates no limit (infinity). +// If omitted or null, 30000 is used. +type PeriodicMetricReaderTimeout *int + +type Propagator struct { + // Configure the propagators in the composite text map propagator. Entries from + // .composite_list are appended to the list here with duplicates filtered out. + // Built-in propagator keys include: tracecontext, baggage, b3, b3multi, jaeger, + // ottrace. Known third party keys include: xray. + // If omitted, and .composite_list is omitted or null, a noop propagator is used. + // + Composite []TextMapPropagator `json:"composite,omitempty" yaml:"composite,omitempty" mapstructure:"composite,omitempty"` + + // Configure the propagators in the composite text map propagator. Entries are + // appended to .composite with duplicates filtered out. + // The value is a comma separated list of propagator identifiers matching the + // format of OTEL_PROPAGATORS. See + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#general-sdk-configuration + // for details. + // Built-in propagator identifiers include: tracecontext, baggage, b3, b3multi, + // jaeger, ottrace. Known third party identifiers include: xray. + // If omitted or null, and .composite is omitted or null, a noop propagator is + // used. + // + CompositeList PropagatorCompositeList `json:"composite_list,omitempty" yaml:"composite_list,omitempty" mapstructure:"composite_list,omitempty"` +} + +// Configure the propagators in the composite text map propagator. Entries are +// appended to .composite with duplicates filtered out. +// The value is a comma separated list of propagator identifiers matching the +// format of OTEL_PROPAGATORS. See +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#general-sdk-configuration +// for details. +// Built-in propagator identifiers include: tracecontext, baggage, b3, b3multi, +// jaeger, ottrace. Known third party identifiers include: xray. +// If omitted or null, and .composite is omitted or null, a noop propagator is +// used. +type PropagatorCompositeList *string + +type PullMetricExporter struct { + // Configure exporter to be prometheus. + // If omitted, ignore. + // + PrometheusDevelopment *ExperimentalPrometheusMetricExporter `json:"prometheus/development,omitempty" yaml:"prometheus/development,omitempty" mapstructure:"prometheus/development,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type PullMetricReader struct { + // Configure cardinality limits. + // If omitted, default values as described in CardinalityLimits are used. + // + CardinalityLimits *CardinalityLimits `json:"cardinality_limits,omitempty" yaml:"cardinality_limits,omitempty" mapstructure:"cardinality_limits,omitempty"` + + // Configure exporter. + // Property is required and must be non-null. + // + Exporter PullMetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + + // Configure metric producers. + // If omitted, no metric producers are added. + // + Producers []MetricProducer `json:"producers,omitempty" yaml:"producers,omitempty" mapstructure:"producers,omitempty"` +} + +type PushMetricExporter struct { + // Configure exporter to be console. + // If omitted, ignore. + // + Console *ConsoleMetricExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` + + // Configure exporter to be OTLP with file transport. + // If omitted, ignore. + // + OTLPFileDevelopment *ExperimentalOTLPFileMetricExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` + + // Configure exporter to be OTLP with gRPC transport. + // If omitted, ignore. + // + OTLPGrpc *OTLPGrpcMetricExporter `json:"otlp_grpc,omitempty" yaml:"otlp_grpc,omitempty" mapstructure:"otlp_grpc,omitempty"` + + // Configure exporter to be OTLP with HTTP transport. + // If omitted, ignore. + // + OTLPHttp *OTLPHttpMetricExporter `json:"otlp_http,omitempty" yaml:"otlp_http,omitempty" mapstructure:"otlp_http,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type Resource struct { + // Configure resource attributes. Entries have higher priority than entries from + // .resource.attributes_list. + // If omitted, no resource attributes are added. + // + Attributes []AttributeNameValue `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` + + // Configure resource attributes. Entries have lower priority than entries from + // .resource.attributes. + // The value is a list of comma separated key-value pairs matching the format of + // OTEL_RESOURCE_ATTRIBUTES. See + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#general-sdk-configuration + // for details. + // If omitted or null, no resource attributes are added. + // + AttributesList ResourceAttributesList `json:"attributes_list,omitempty" yaml:"attributes_list,omitempty" mapstructure:"attributes_list,omitempty"` + + // Configure resource detection. + // If omitted, resource detection is disabled. + // + DetectionDevelopment *ExperimentalResourceDetection `json:"detection/development,omitempty" yaml:"detection/development,omitempty" mapstructure:"detection/development,omitempty"` + + // Configure resource schema URL. + // If omitted or null, no schema URL is used. + // + SchemaUrl ResourceSchemaUrl `json:"schema_url,omitempty" yaml:"schema_url,omitempty" mapstructure:"schema_url,omitempty"` +} + +// Configure resource attributes. Entries have lower priority than entries from +// .resource.attributes. +// The value is a list of comma separated key-value pairs matching the format of +// OTEL_RESOURCE_ATTRIBUTES. See +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#general-sdk-configuration +// for details. +// If omitted or null, no resource attributes are added. +type ResourceAttributesList *string + +// Configure resource schema URL. +// If omitted or null, no schema URL is used. +type ResourceSchemaUrl *string + +type Sampler struct { + // Configure sampler to be always_off. + // If omitted, ignore. + // + AlwaysOff AlwaysOffSampler `json:"always_off,omitempty" yaml:"always_off,omitempty" mapstructure:"always_off,omitempty"` + + // Configure sampler to be always_on. + // If omitted, ignore. + // + AlwaysOn AlwaysOnSampler `json:"always_on,omitempty" yaml:"always_on,omitempty" mapstructure:"always_on,omitempty"` + + // Configure sampler to be composite. + // If omitted, ignore. + // + CompositeDevelopment *ExperimentalComposableSampler `json:"composite/development,omitempty" yaml:"composite/development,omitempty" mapstructure:"composite/development,omitempty"` + + // Configure sampler to be jaeger_remote. + // If omitted, ignore. + // + JaegerRemoteDevelopment *ExperimentalJaegerRemoteSampler `json:"jaeger_remote/development,omitempty" yaml:"jaeger_remote/development,omitempty" mapstructure:"jaeger_remote/development,omitempty"` + + // Configure sampler to be parent_based. + // If omitted, ignore. + // + ParentBased *ParentBasedSampler `json:"parent_based,omitempty" yaml:"parent_based,omitempty" mapstructure:"parent_based,omitempty"` + + // Configure sampler to be probability. + // If omitted, ignore. + // + ProbabilityDevelopment *ExperimentalProbabilitySampler `json:"probability/development,omitempty" yaml:"probability/development,omitempty" mapstructure:"probability/development,omitempty"` + + // Configure sampler to be trace_id_ratio_based. + // If omitted, ignore. + // + TraceIDRatioBased *TraceIDRatioBasedSampler `json:"trace_id_ratio_based,omitempty" yaml:"trace_id_ratio_based,omitempty" mapstructure:"trace_id_ratio_based,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type SeverityNumber string + +const SeverityNumberDebug SeverityNumber = "debug" +const SeverityNumberDebug2 SeverityNumber = "debug2" +const SeverityNumberDebug3 SeverityNumber = "debug3" +const SeverityNumberDebug4 SeverityNumber = "debug4" +const SeverityNumberError SeverityNumber = "error" +const SeverityNumberError2 SeverityNumber = "error2" +const SeverityNumberError3 SeverityNumber = "error3" +const SeverityNumberError4 SeverityNumber = "error4" +const SeverityNumberFatal SeverityNumber = "fatal" +const SeverityNumberFatal2 SeverityNumber = "fatal2" +const SeverityNumberFatal3 SeverityNumber = "fatal3" +const SeverityNumberFatal4 SeverityNumber = "fatal4" +const SeverityNumberInfo SeverityNumber = "info" +const SeverityNumberInfo2 SeverityNumber = "info2" +const SeverityNumberInfo3 SeverityNumber = "info3" +const SeverityNumberInfo4 SeverityNumber = "info4" +const SeverityNumberTrace SeverityNumber = "trace" +const SeverityNumberTrace2 SeverityNumber = "trace2" +const SeverityNumberTrace3 SeverityNumber = "trace3" +const SeverityNumberTrace4 SeverityNumber = "trace4" +const SeverityNumberWarn SeverityNumber = "warn" +const SeverityNumberWarn2 SeverityNumber = "warn2" +const SeverityNumberWarn3 SeverityNumber = "warn3" +const SeverityNumberWarn4 SeverityNumber = "warn4" + +type SimpleLogRecordProcessor struct { + // Configure exporter. + // Property is required and must be non-null. + // + Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` +} + +type SimpleSpanProcessor struct { + // Configure exporter. + // Property is required and must be non-null. + // + Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` +} + +type SpanExporter struct { + // Configure exporter to be console. + // If omitted, ignore. + // + Console ConsoleExporter `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` + + // Configure exporter to be OTLP with file transport. + // If omitted, ignore. + // + OTLPFileDevelopment *ExperimentalOTLPFileExporter `json:"otlp_file/development,omitempty" yaml:"otlp_file/development,omitempty" mapstructure:"otlp_file/development,omitempty"` + + // Configure exporter to be OTLP with gRPC transport. + // If omitted, ignore. + // + OTLPGrpc *OTLPGrpcExporter `json:"otlp_grpc,omitempty" yaml:"otlp_grpc,omitempty" mapstructure:"otlp_grpc,omitempty"` + + // Configure exporter to be OTLP with HTTP transport. + // If omitted, ignore. + // + OTLPHttp *OTLPHttpExporter `json:"otlp_http,omitempty" yaml:"otlp_http,omitempty" mapstructure:"otlp_http,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type SpanKind string + +const SpanKindClient SpanKind = "client" +const SpanKindConsumer SpanKind = "consumer" +const SpanKindInternal SpanKind = "internal" +const SpanKindProducer SpanKind = "producer" +const SpanKindServer SpanKind = "server" + +type SpanLimits struct { + // Configure max attribute count. Overrides + // .attribute_limits.attribute_count_limit. + // Value must be non-negative. + // If omitted or null, 128 is used. + // + AttributeCountLimit SpanLimitsAttributeCountLimit `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` + + // Configure max attribute value size. Overrides + // .attribute_limits.attribute_value_length_limit. + // Value must be non-negative. + // If omitted or null, there is no limit. + // + AttributeValueLengthLimit SpanLimitsAttributeValueLengthLimit `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` + + // Configure max attributes per span event. + // Value must be non-negative. + // If omitted or null, 128 is used. + // + EventAttributeCountLimit SpanLimitsEventAttributeCountLimit `json:"event_attribute_count_limit,omitempty" yaml:"event_attribute_count_limit,omitempty" mapstructure:"event_attribute_count_limit,omitempty"` + + // Configure max span event count. + // Value must be non-negative. + // If omitted or null, 128 is used. + // + EventCountLimit SpanLimitsEventCountLimit `json:"event_count_limit,omitempty" yaml:"event_count_limit,omitempty" mapstructure:"event_count_limit,omitempty"` + + // Configure max attributes per span link. + // Value must be non-negative. + // If omitted or null, 128 is used. + // + LinkAttributeCountLimit SpanLimitsLinkAttributeCountLimit `json:"link_attribute_count_limit,omitempty" yaml:"link_attribute_count_limit,omitempty" mapstructure:"link_attribute_count_limit,omitempty"` + + // Configure max span link count. + // Value must be non-negative. + // If omitted or null, 128 is used. + // + LinkCountLimit SpanLimitsLinkCountLimit `json:"link_count_limit,omitempty" yaml:"link_count_limit,omitempty" mapstructure:"link_count_limit,omitempty"` +} + +// Configure max attribute count. Overrides +// .attribute_limits.attribute_count_limit. +// Value must be non-negative. +// If omitted or null, 128 is used. +type SpanLimitsAttributeCountLimit *int + +// Configure max attribute value size. Overrides +// .attribute_limits.attribute_value_length_limit. +// Value must be non-negative. +// If omitted or null, there is no limit. +type SpanLimitsAttributeValueLengthLimit *int + +// Configure max attributes per span event. +// Value must be non-negative. +// If omitted or null, 128 is used. +type SpanLimitsEventAttributeCountLimit *int + +// Configure max span event count. +// Value must be non-negative. +// If omitted or null, 128 is used. +type SpanLimitsEventCountLimit *int + +// Configure max attributes per span link. +// Value must be non-negative. +// If omitted or null, 128 is used. +type SpanLimitsLinkAttributeCountLimit *int + +// Configure max span link count. +// Value must be non-negative. +// If omitted or null, 128 is used. +type SpanLimitsLinkCountLimit *int + +type SpanProcessor struct { + // Configure a batch span processor. + // If omitted, ignore. + // + Batch *BatchSpanProcessor `json:"batch,omitempty" yaml:"batch,omitempty" mapstructure:"batch,omitempty"` + + // Configure a simple span processor. + // If omitted, ignore. + // + Simple *SimpleSpanProcessor `json:"simple,omitempty" yaml:"simple,omitempty" mapstructure:"simple,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type SumAggregation map[string]interface{} + +type TextMapPropagator struct { + // Include the zipkin b3 propagator. + // If omitted, ignore. + // + B3 B3Propagator `json:"b3,omitempty" yaml:"b3,omitempty" mapstructure:"b3,omitempty"` + + // Include the zipkin b3 multi propagator. + // If omitted, ignore. + // + B3Multi B3MultiPropagator `json:"b3multi,omitempty" yaml:"b3multi,omitempty" mapstructure:"b3multi,omitempty"` + + // Include the w3c baggage propagator. + // If omitted, ignore. + // + Baggage BaggagePropagator `json:"baggage,omitempty" yaml:"baggage,omitempty" mapstructure:"baggage,omitempty"` + + // Include the jaeger propagator. + // If omitted, ignore. + // + Jaeger JaegerPropagator `json:"jaeger,omitempty" yaml:"jaeger,omitempty" mapstructure:"jaeger,omitempty"` + + // Include the opentracing propagator. + // If omitted, ignore. + // + Ottrace OpenTracingPropagator `json:"ottrace,omitempty" yaml:"ottrace,omitempty" mapstructure:"ottrace,omitempty"` + + // Include the w3c trace context propagator. + // If omitted, ignore. + // + Tracecontext TraceContextPropagator `json:"tracecontext,omitempty" yaml:"tracecontext,omitempty" mapstructure:"tracecontext,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +type TraceContextPropagator map[string]interface{} + +type TraceIDRatioBasedSampler struct { + // Configure trace_id_ratio. + // If omitted or null, 1.0 is used. + // + Ratio TraceIDRatioBasedSamplerRatio `json:"ratio,omitempty" yaml:"ratio,omitempty" mapstructure:"ratio,omitempty"` +} + +// Configure trace_id_ratio. +// If omitted or null, 1.0 is used. +type TraceIDRatioBasedSamplerRatio *float64 + +type TracerProvider struct { + // Configure span limits. See also attribute_limits. + // If omitted, default values as described in SpanLimits are used. + // + Limits *SpanLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Configure span processors. + // Property is required and must be non-null. + // + Processors []SpanProcessor `json:"processors" yaml:"processors" mapstructure:"processors"` + + // Configure the sampler. + // If omitted, parent based sampler with a root of always_on is used. + // + Sampler *Sampler `json:"sampler,omitempty" yaml:"sampler,omitempty" mapstructure:"sampler,omitempty"` + + // Configure tracers. + // If omitted, all tracers use default values as described in + // ExperimentalTracerConfig. + // + TracerConfiguratorDevelopment *ExperimentalTracerConfigurator `json:"tracer_configurator/development,omitempty" yaml:"tracer_configurator/development,omitempty" mapstructure:"tracer_configurator/development,omitempty"` +} + +type View struct { + // Configure view selector. + // Selection criteria is additive as described in + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#instrument-selection-criteria. + // Property is required and must be non-null. + // + Selector ViewSelector `json:"selector" yaml:"selector" mapstructure:"selector"` + + // Configure view stream. + // Property is required and must be non-null. + // + Stream ViewStream `json:"stream" yaml:"stream" mapstructure:"stream"` +} + +type ViewSelector struct { + // Configure instrument name selection criteria. + // If omitted or null, all instrument names match. + // + InstrumentName ViewSelectorInstrumentName `json:"instrument_name,omitempty" yaml:"instrument_name,omitempty" mapstructure:"instrument_name,omitempty"` + + // Configure instrument type selection criteria. + // Values include: + // * counter: Synchronous counter instruments. + // * gauge: Synchronous gauge instruments. + // * histogram: Synchronous histogram instruments. + // * observable_counter: Asynchronous counter instruments. + // * observable_gauge: Asynchronous gauge instruments. + // * observable_up_down_counter: Asynchronous up down counter instruments. + // * up_down_counter: Synchronous up down counter instruments. + // If omitted, all instrument types match. + // + InstrumentType *InstrumentType `json:"instrument_type,omitempty" yaml:"instrument_type,omitempty" mapstructure:"instrument_type,omitempty"` + + // Configure meter name selection criteria. + // If omitted or null, all meter names match. + // + MeterName ViewSelectorMeterName `json:"meter_name,omitempty" yaml:"meter_name,omitempty" mapstructure:"meter_name,omitempty"` + + // Configure meter schema url selection criteria. + // If omitted or null, all meter schema URLs match. + // + MeterSchemaUrl ViewSelectorMeterSchemaUrl `json:"meter_schema_url,omitempty" yaml:"meter_schema_url,omitempty" mapstructure:"meter_schema_url,omitempty"` + + // Configure meter version selection criteria. + // If omitted or null, all meter versions match. + // + MeterVersion ViewSelectorMeterVersion `json:"meter_version,omitempty" yaml:"meter_version,omitempty" mapstructure:"meter_version,omitempty"` + + // Configure the instrument unit selection criteria. + // If omitted or null, all instrument units match. + // + Unit ViewSelectorUnit `json:"unit,omitempty" yaml:"unit,omitempty" mapstructure:"unit,omitempty"` +} + +// Configure instrument name selection criteria. +// If omitted or null, all instrument names match. +type ViewSelectorInstrumentName *string + +// Configure meter name selection criteria. +// If omitted or null, all meter names match. +type ViewSelectorMeterName *string + +// Configure meter schema url selection criteria. +// If omitted or null, all meter schema URLs match. +type ViewSelectorMeterSchemaUrl *string + +// Configure meter version selection criteria. +// If omitted or null, all meter versions match. +type ViewSelectorMeterVersion *string + +// Configure the instrument unit selection criteria. +// If omitted or null, all instrument units match. +type ViewSelectorUnit *string + +type ViewStream struct { + // Configure aggregation of the resulting stream(s). + // If omitted, default is used. + // + Aggregation *Aggregation `json:"aggregation,omitempty" yaml:"aggregation,omitempty" mapstructure:"aggregation,omitempty"` + + // Configure the aggregation cardinality limit. + // If omitted or null, the metric reader's default cardinality limit is used. + // + AggregationCardinalityLimit ViewStreamAggregationCardinalityLimit `json:"aggregation_cardinality_limit,omitempty" yaml:"aggregation_cardinality_limit,omitempty" mapstructure:"aggregation_cardinality_limit,omitempty"` + + // Configure attribute keys retained in the resulting stream(s). + // If omitted, all attribute keys are retained. + // + AttributeKeys *IncludeExclude `json:"attribute_keys,omitempty" yaml:"attribute_keys,omitempty" mapstructure:"attribute_keys,omitempty"` + + // Configure metric description of the resulting stream(s). + // If omitted or null, the instrument's origin description is used. + // + Description ViewStreamDescription `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description,omitempty"` + + // Configure metric name of the resulting stream(s). + // If omitted or null, the instrument's original name is used. + // + Name ViewStreamName `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` +} + +// Configure the aggregation cardinality limit. +// If omitted or null, the metric reader's default cardinality limit is used. +type ViewStreamAggregationCardinalityLimit *int + +// Configure metric description of the resulting stream(s). +// If omitted or null, the instrument's origin description is used. +type ViewStreamDescription *string + +// Configure metric name of the resulting stream(s). +// If omitted or null, the instrument's original name is used. +type ViewStreamName *string diff --git a/otelconf/x/log.go b/otelconf/x/log.go new file mode 100644 index 00000000000..f5e77685cba --- /dev/null +++ b/otelconf/x/log.go @@ -0,0 +1,234 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x // import "go.opentelemetry.io/contrib/otelconf/x" + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" + "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/noop" + sdklog "go.opentelemetry.io/otel/sdk/log" + "go.opentelemetry.io/otel/sdk/resource" + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/contrib/otelconf/internal/tls" +) + +func loggerProvider(cfg configOptions, res *resource.Resource) (log.LoggerProvider, shutdownFunc, error) { + if cfg.opentelemetryConfig.LoggerProvider == nil { + return noop.NewLoggerProvider(), noopShutdown, nil + } + opts := append(cfg.loggerProviderOptions, sdklog.WithResource(res)) + + var errs []error + for _, processor := range cfg.opentelemetryConfig.LoggerProvider.Processors { + sp, err := logProcessor(cfg.ctx, processor) + if err == nil { + opts = append(opts, sdklog.WithProcessor(sp)) + } else { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return noop.NewLoggerProvider(), noopShutdown, errors.Join(errs...) + } + + lp := sdklog.NewLoggerProvider(opts...) + return lp, lp.Shutdown, nil +} + +func logProcessor(ctx context.Context, processor LogRecordProcessor) (sdklog.Processor, error) { + if processor.Batch != nil && processor.Simple != nil { + return nil, newErrInvalid("must not specify multiple log processor type") + } + if processor.Batch != nil { + exp, err := logExporter(ctx, processor.Batch.Exporter) + if err != nil { + return nil, err + } + return batchLogProcessor(processor.Batch, exp) + } + if processor.Simple != nil { + exp, err := logExporter(ctx, processor.Simple.Exporter) + if err != nil { + return nil, err + } + return sdklog.NewSimpleProcessor(exp), nil + } + return nil, newErrInvalid("unsupported log processor type, must be one of simple or batch") +} + +func logExporter(ctx context.Context, exporter LogRecordExporter) (sdklog.Exporter, error) { + exportersConfigured := 0 + var exportFunc func() (sdklog.Exporter, error) + + if exporter.Console != nil { + exportersConfigured++ + exportFunc = func() (sdklog.Exporter, error) { + return stdoutlog.New( + stdoutlog.WithPrettyPrint(), + ) + } + } + + if exporter.OTLPHttp != nil { + exportersConfigured++ + exportFunc = func() (sdklog.Exporter, error) { + return otlpHTTPLogExporter(ctx, exporter.OTLPHttp) + } + } + if exporter.OTLPGrpc != nil { + exportersConfigured++ + exportFunc = func() (sdklog.Exporter, error) { + return otlpGRPCLogExporter(ctx, exporter.OTLPGrpc) + } + } + if exporter.OTLPFileDevelopment != nil { + // TODO: implement file exporter https://github.com/open-telemetry/opentelemetry-go/issues/5408 + return nil, newErrInvalid("otlp_file/development") + } + + if exportersConfigured > 1 { + return nil, newErrInvalid("must not specify multiple exporters") + } + + if exportFunc != nil { + return exportFunc() + } + + return nil, newErrInvalid("no valid log exporter") +} + +func batchLogProcessor(blp *BatchLogRecordProcessor, exp sdklog.Exporter) (*sdklog.BatchProcessor, error) { + var opts []sdklog.BatchProcessorOption + if err := validateBatchLogRecordProcessor(blp); err != nil { + return nil, err + } + if blp.ExportTimeout != nil { + opts = append(opts, sdklog.WithExportTimeout(time.Millisecond*time.Duration(*blp.ExportTimeout))) + } + if blp.MaxExportBatchSize != nil { + opts = append(opts, sdklog.WithExportMaxBatchSize(*blp.MaxExportBatchSize)) + } + if blp.MaxQueueSize != nil { + opts = append(opts, sdklog.WithMaxQueueSize(*blp.MaxQueueSize)) + } + + if blp.ScheduleDelay != nil { + opts = append(opts, sdklog.WithExportInterval(time.Millisecond*time.Duration(*blp.ScheduleDelay))) + } + + return sdklog.NewBatchProcessor(exp, opts...), nil +} + +func otlpHTTPLogExporter(ctx context.Context, otlpConfig *OTLPHttpExporter) (sdklog.Exporter, error) { + var opts []otlploghttp.Option + + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) + if err != nil { + return nil, errors.Join(newErrInvalid("endpoint parsing failed"), err) + } + opts = append(opts, otlploghttp.WithEndpoint(u.Host)) + + if u.Scheme == "http" { + opts = append(opts, otlploghttp.WithInsecure()) + } + if u.Path != "" { + opts = append(opts, otlploghttp.WithURLPath(u.Path)) + } + } + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlploghttp.WithCompression(otlploghttp.GzipCompression)) + case compressionNone: + opts = append(opts, otlploghttp.WithCompression(otlploghttp.NoCompression)) + default: + return nil, newErrInvalid(fmt.Sprintf("unsupported compression %q", *otlpConfig.Compression)) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlploghttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) + if err != nil { + return nil, err + } + if len(headersConfig) > 0 { + opts = append(opts, otlploghttp.WithHeaders(headersConfig)) + } + + if otlpConfig.Tls != nil { + tlsConfig, err := tls.CreateConfig(otlpConfig.Tls.CaFile, otlpConfig.Tls.CertFile, otlpConfig.Tls.KeyFile) + if err != nil { + return nil, errors.Join(newErrInvalid("tls configuration"), err) + } + opts = append(opts, otlploghttp.WithTLSClientConfig(tlsConfig)) + } + + return otlploghttp.New(ctx, opts...) +} + +func otlpGRPCLogExporter(ctx context.Context, otlpConfig *OTLPGrpcExporter) (sdklog.Exporter, error) { + var opts []otlploggrpc.Option + + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) + if err != nil { + return nil, errors.Join(newErrInvalid("endpoint parsing failed"), err) + } + // ParseRequestURI leaves the Host field empty when no + // scheme is specified (i.e. localhost:4317). This check is + // here to support the case where a user may not specify a + // scheme. The code does its best effort here by using + // otlpConfig.Endpoint as-is in that case + if u.Host != "" { + opts = append(opts, otlploggrpc.WithEndpoint(u.Host)) + } else { + opts = append(opts, otlploggrpc.WithEndpoint(*otlpConfig.Endpoint)) + } + if u.Scheme == "http" || (u.Scheme != "https" && otlpConfig.Tls != nil && otlpConfig.Tls.Insecure != nil && *otlpConfig.Tls.Insecure) { + opts = append(opts, otlploggrpc.WithInsecure()) + } + } + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlploggrpc.WithCompressor(*otlpConfig.Compression)) + case compressionNone: + // none requires no options + default: + return nil, newErrInvalid(fmt.Sprintf("unsupported compression %q", *otlpConfig.Compression)) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlploggrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) + if err != nil { + return nil, err + } + if len(headersConfig) > 0 { + opts = append(opts, otlploggrpc.WithHeaders(headersConfig)) + } + + if otlpConfig.Tls != nil && (otlpConfig.Tls.CaFile != nil || otlpConfig.Tls.CertFile != nil || otlpConfig.Tls.KeyFile != nil) { + tlsConfig, err := tls.CreateConfig(otlpConfig.Tls.CaFile, otlpConfig.Tls.CertFile, otlpConfig.Tls.KeyFile) + if err != nil { + return nil, errors.Join(newErrInvalid("tls configuration"), err) + } + opts = append(opts, otlploggrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))) + } + + return otlploggrpc.New(ctx, opts...) +} diff --git a/otelconf/x/log_test.go b/otelconf/x/log_test.go new file mode 100644 index 00000000000..0a6a4f90155 --- /dev/null +++ b/otelconf/x/log_test.go @@ -0,0 +1,906 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "errors" + "net" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" + "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/noop" + sdklog "go.opentelemetry.io/otel/sdk/log" + sdklogtest "go.opentelemetry.io/otel/sdk/log/logtest" + "go.opentelemetry.io/otel/sdk/resource" + collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +func TestLoggerProvider(t *testing.T) { + tests := []struct { + name string + cfg configOptions + wantProvider log.LoggerProvider + wantErr error + }{ + { + name: "no-logger-provider-configured", + wantProvider: noop.NewLoggerProvider(), + }, + { + name: "error-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + LoggerProvider: &LoggerProvider{ + Processors: []LogRecordProcessor{ + { + Simple: &SimpleLogRecordProcessor{}, + Batch: &BatchLogRecordProcessor{}, + }, + }, + }, + }, + }, + wantProvider: noop.NewLoggerProvider(), + wantErr: newErrInvalid("must not specify multiple log processor type"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mp, shutdown, err := loggerProvider(tt.cfg, resource.Default()) + require.Equal(t, tt.wantProvider, mp) + assert.ErrorIs(t, err, tt.wantErr) + require.NoError(t, shutdown(t.Context())) + }) + } +} + +func TestLogProcessor(t *testing.T) { + ctx := t.Context() + + otlpHTTPExporter, err := otlploghttp.New(ctx) + require.NoError(t, err) + + otlpGRPCExporter, err := otlploggrpc.New(ctx) + require.NoError(t, err) + + consoleExporter, err := stdoutlog.New( + stdoutlog.WithPrettyPrint(), + ) + require.NoError(t, err) + + testCases := []struct { + name string + processor LogRecordProcessor + args any + wantErrT error + wantProcessor sdklog.Processor + }{ + { + name: "no processor", + wantErrT: newErrInvalid("unsupported log processor type, must be one of simple or batch"), + }, + { + name: "multiple processor types", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{}, + }, + Simple: &SimpleLogRecordProcessor{}, + }, + wantErrT: newErrInvalid("must not specify multiple log processor type"), + }, + { + name: "batch processor invalid batch size otlphttp exporter", + + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{}, + }, + }, + }, + wantErrT: newErrGreaterThanZero("max_export_batch_size"), + }, + { + name: "batch processor invalid export timeout otlphttp exporter", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + ExportTimeout: ptr(-2), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{}, + }, + }, + }, + wantErrT: newErrGreaterOrEqualZero("export_timeout"), + }, + { + name: "batch processor invalid queue size otlphttp exporter", + + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxQueueSize: ptr(-3), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{}, + }, + }, + }, + wantErrT: newErrGreaterThanZero("max_queue_size"), + }, + { + name: "batch processor invalid schedule delay console exporter", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + ScheduleDelay: ptr(-4), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{}, + }, + }, + }, + wantErrT: newErrGreaterOrEqualZero("schedule_delay"), + }, + { + name: "batch processor invalid exporter", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{}, + }, + }, + wantErrT: newErrInvalid("no valid log exporter"), + }, + { + name: "batch/console", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(consoleExporter), + }, + { + name: "batch/otlp-grpc-exporter-no-endpoint", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-exporter", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("http://localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-exporter-socket-endpoint", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("unix:collector.sock"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-good-ca-certificate", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &GrpcTls{ + CaFile: ptr(filepath.Join("..", "testdata", "ca.crt")), + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-bad-ca-certificate", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &GrpcTls{ + CaFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("tls configuration"), + }, + { + name: "batch/otlp-grpc-bad-headerslist", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + HeadersList: ptr("==="), + }, + }, + }, + }, + wantErrT: newErrInvalid("invalid headers_list"), + }, + { + name: "batch/otlp-grpc-bad-client-certificate", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &GrpcTls{ + KeyFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + CertFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("tls configuration"), + }, + { + name: "batch/otlp-grpc-exporter-no-scheme", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-invalid-endpoint", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr(" "), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("endpoint parsing failed"), + }, + { + name: "batch/otlp-grpc-invalid-compression", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("unsupported compression \"invalid\""), + }, + { + name: "batch/otlp-http-exporter", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("http://localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-good-ca-certificate", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &HttpTls{ + CaFile: ptr(filepath.Join("..", "testdata", "ca.crt")), + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-bad-ca-certificate", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &HttpTls{ + CaFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("tls configuration"), + }, + { + name: "batch/otlp-http-bad-client-certificate", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &HttpTls{ + KeyFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + CertFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("tls configuration"), + }, + { + name: "batch/otlp-http-bad-headerslist", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + HeadersList: ptr("==="), + }, + }, + }, + }, + wantErrT: newErrInvalid("invalid headers_list"), + }, + { + name: "batch/otlp-http-exporter-with-path", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("http://localhost:4318/path/123"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-exporter-no-endpoint", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-exporter-no-scheme", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-invalid-endpoint", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr(" "), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("endpoint parsing failed"), + }, + { + name: "batch/otlp-http-none-compression", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-invalid-compression", + processor: LogRecordProcessor{ + Batch: &BatchLogRecordProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("unsupported compression \"invalid\""), + }, + { + name: "simple/no-exporter", + processor: LogRecordProcessor{ + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{}, + }, + }, + wantErrT: newErrInvalid("no valid log exporter"), + }, + { + name: "simple/console", + processor: LogRecordProcessor{ + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantProcessor: sdklog.NewSimpleProcessor(consoleExporter), + }, + { + name: "simple/otlp_file", + processor: LogRecordProcessor{ + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileExporter{}, + }, + }, + }, + wantErrT: newErrInvalid("otlp_file/development"), + }, + { + name: "simple/multiple", + processor: LogRecordProcessor{ + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{ + Console: ConsoleExporter{}, + OTLPGrpc: &OTLPGrpcExporter{}, + }, + }, + }, + wantErrT: newErrInvalid("must not specify multiple exporters"), + }, + { + name: "simple/otlp-exporter", + processor: LogRecordProcessor{ + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdklog.NewSimpleProcessor(otlpHTTPExporter), + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := logProcessor(t.Context(), tt.processor) + require.ErrorIs(t, err, tt.wantErrT) + if tt.wantProcessor == nil { + require.Nil(t, got) + } else { + require.Equal(t, reflect.TypeOf(tt.wantProcessor), reflect.TypeOf(got)) + wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantProcessor)).FieldByName("exporter").Elem().Type() + gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName("exporter").Elem().Type() + require.Equal(t, wantExporterType.String(), gotExporterType.String()) + } + }) + } +} + +func TestLoggerProviderOptions(t *testing.T) { + var calls int + srv := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) { + calls++ + })) + defer srv.Close() + + cfg := OpenTelemetryConfiguration{ + LoggerProvider: &LoggerProvider{ + Processors: []LogRecordProcessor{{ + Simple: &SimpleLogRecordProcessor{ + Exporter: LogRecordExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr(srv.URL), + }, + }, + }, + }}, + }, + } + + var buf bytes.Buffer + stdoutlogExporter, err := stdoutlog.New(stdoutlog.WithWriter(&buf)) + require.NoError(t, err) + + res := resource.NewSchemaless(attribute.String("foo", "bar")) + sdk, err := NewSDK( + WithOpenTelemetryConfiguration(cfg), + WithLoggerProviderOptions(sdklog.WithProcessor(sdklog.NewSimpleProcessor(stdoutlogExporter))), + WithLoggerProviderOptions(sdklog.WithResource(res)), + ) + require.NoError(t, err) + defer func() { + assert.NoError(t, sdk.Shutdown(t.Context())) + }() + + // The exporter, which we passed in as an extra option to NewSDK, + // should be wired up to the provider in addition to the + // configuration-based OTLP exporter. + logger := sdk.LoggerProvider().Logger("test") + logger.Emit(t.Context(), log.Record{}) + assert.NotZero(t, buf) + assert.Equal(t, 1, calls) + // Options provided by WithMeterProviderOptions may be overridden + // by configuration, e.g. the resource is always defined via + // configuration. + assert.NotContains(t, buf.String(), "foo") +} + +func Test_otlpGRPCLogExporter(t *testing.T) { + if runtime.GOOS == "windows" { + // TODO (#7446): Fix the flakiness on Windows. + t.Skip("Test is flaky on Windows.") + } + type args struct { + ctx context.Context + otlpConfig *OTLPGrpcExporter + } + tests := []struct { + name string + args args + grpcServerOpts func() ([]grpc.ServerOption, error) + }{ + { + name: "no TLS config", + args: args{ + ctx: t.Context(), + otlpConfig: &OTLPGrpcExporter{ + Compression: ptr("gzip"), + Timeout: ptr(5000), + Tls: &GrpcTls{ + Insecure: ptr(true), + }, + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + grpcServerOpts: func() ([]grpc.ServerOption, error) { + return []grpc.ServerOption{}, nil + }, + }, + { + name: "with TLS config", + args: args{ + ctx: t.Context(), + otlpConfig: &OTLPGrpcExporter{ + Compression: ptr("gzip"), + Timeout: ptr(5000), + Tls: &GrpcTls{ + CaFile: ptr("../testdata/server-certs/server.crt"), + }, + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + grpcServerOpts: func() ([]grpc.ServerOption, error) { + opts := []grpc.ServerOption{} + tlsCreds, err := credentials.NewServerTLSFromFile("../testdata/server-certs/server.crt", "../testdata/server-certs/server.key") + if err != nil { + return nil, err + } + opts = append(opts, grpc.Creds(tlsCreds)) + return opts, nil + }, + }, + { + name: "with TLS config and client key", + args: args{ + ctx: t.Context(), + otlpConfig: &OTLPGrpcExporter{ + Compression: ptr("gzip"), + Timeout: ptr(5000), + Tls: &GrpcTls{ + CaFile: ptr("../testdata/server-certs/server.crt"), + KeyFile: ptr("../testdata/client-certs/client.key"), + CertFile: ptr("../testdata/client-certs/client.crt"), + }, + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + grpcServerOpts: func() ([]grpc.ServerOption, error) { + opts := []grpc.ServerOption{} + cert, err := tls.LoadX509KeyPair("../testdata/server-certs/server.crt", "../testdata/server-certs/server.key") + if err != nil { + return nil, err + } + caCert, err := os.ReadFile("../testdata/ca.crt") + if err != nil { + return nil, err + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + tlsCreds := credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + }) + opts = append(opts, grpc.Creds(tlsCreds)) + return opts, nil + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n, err := net.Listen("tcp4", "localhost:0") + require.NoError(t, err) + + // We need to manually construct the endpoint using the port on which the server is listening. + // + // n.Addr() always returns 127.0.0.1 instead of localhost. + // But our certificate is created with CN as 'localhost', not '127.0.0.1'. + // So we have to manually form the endpoint as "localhost:". + _, port, err := net.SplitHostPort(n.Addr().String()) + require.NoError(t, err) + tt.args.otlpConfig.Endpoint = ptr("localhost:" + port) + + serverOpts, err := tt.grpcServerOpts() + require.NoError(t, err) + + startGRPCLogsCollector(t, n, serverOpts) + + exporter, err := otlpGRPCLogExporter(tt.args.ctx, tt.args.otlpConfig) + require.NoError(t, err) + + logFactory := sdklogtest.RecordFactory{ + Body: log.StringValue("test"), + } + + assert.EventuallyWithT(t, func(collect *assert.CollectT) { + assert.NoError(collect, exporter.Export(context.Background(), []sdklog.Record{ //nolint:usetesting // required to avoid getting a canceled context. + logFactory.NewRecord(), + })) + }, 10*time.Second, 1*time.Second) + }) + } +} + +// grpcLogsCollector is an OTLP gRPC server that collects all requests it receives. +type grpcLogsCollector struct { + collogpb.UnimplementedLogsServiceServer +} + +var _ collogpb.LogsServiceServer = (*grpcLogsCollector)(nil) + +// startGRPCLogsCollector returns a *grpcLogsCollector that is listening at the provided +// endpoint. +// +// If endpoint is an empty string, the returned collector will be listening on +// the localhost interface at an OS chosen port. +func startGRPCLogsCollector(t *testing.T, listener net.Listener, serverOptions []grpc.ServerOption) { + srv := grpc.NewServer(serverOptions...) + c := &grpcLogsCollector{} + + collogpb.RegisterLogsServiceServer(srv, c) + + errCh := make(chan error, 1) + go func() { errCh <- srv.Serve(listener) }() + + t.Cleanup(func() { + srv.GracefulStop() + if err := <-errCh; err != nil && !errors.Is(err, grpc.ErrServerStopped) { + assert.NoError(t, err) + } + }) +} + +// Export handles the export req. +func (*grpcLogsCollector) Export( + _ context.Context, + _ *collogpb.ExportLogsServiceRequest, +) (*collogpb.ExportLogsServiceResponse, error) { + return &collogpb.ExportLogsServiceResponse{}, nil +} diff --git a/otelconf/x/metric.go b/otelconf/x/metric.go new file mode 100644 index 00000000000..a303d8195a9 --- /dev/null +++ b/otelconf/x/metric.go @@ -0,0 +1,587 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x // import "go.opentelemetry.io/contrib/otelconf/x" + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math" + "net" + "net/http" + "net/url" + "os" + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/prometheus/otlptranslator" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + otelprom "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/sdk/instrumentation" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/contrib/otelconf/internal/tls" +) + +var zeroScope instrumentation.Scope + +const instrumentKindUndefined = sdkmetric.InstrumentKind(0) + +func meterProvider(cfg configOptions, res *resource.Resource) (metric.MeterProvider, shutdownFunc, error) { + if cfg.opentelemetryConfig.MeterProvider == nil { + return noop.NewMeterProvider(), noopShutdown, nil + } + opts := append(cfg.meterProviderOptions, sdkmetric.WithResource(res)) + + var errs []error + for _, reader := range cfg.opentelemetryConfig.MeterProvider.Readers { + r, err := metricReader(cfg.ctx, reader) + if err == nil { + opts = append(opts, sdkmetric.WithReader(r)) + } else { + errs = append(errs, err) + } + } + for _, vw := range cfg.opentelemetryConfig.MeterProvider.Views { + v, err := view(vw) + if err == nil { + opts = append(opts, sdkmetric.WithView(v)) + } else { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return noop.NewMeterProvider(), noopShutdown, errors.Join(errs...) + } + + mp := sdkmetric.NewMeterProvider(opts...) + return mp, mp.Shutdown, nil +} + +func metricReader(ctx context.Context, r MetricReader) (sdkmetric.Reader, error) { + if r.Periodic != nil && r.Pull != nil { + return nil, newErrInvalid("must not specify multiple metric reader type") + } + + if r.Periodic != nil { + var opts []sdkmetric.PeriodicReaderOption + if r.Periodic.Interval != nil { + opts = append(opts, sdkmetric.WithInterval(time.Duration(*r.Periodic.Interval)*time.Millisecond)) + } + + if r.Periodic.Timeout != nil { + opts = append(opts, sdkmetric.WithTimeout(time.Duration(*r.Periodic.Timeout)*time.Millisecond)) + } + return periodicExporter(ctx, r.Periodic.Exporter, opts...) + } + + if r.Pull != nil { + return pullReader(ctx, r.Pull.Exporter) + } + return nil, newErrInvalid("no valid metric reader") +} + +func pullReader(ctx context.Context, exporter PullMetricExporter) (sdkmetric.Reader, error) { + if exporter.PrometheusDevelopment != nil { + return prometheusReader(ctx, exporter.PrometheusDevelopment) + } + return nil, newErrInvalid("no valid metric exporter") +} + +func periodicExporter(ctx context.Context, exporter PushMetricExporter, opts ...sdkmetric.PeriodicReaderOption) (sdkmetric.Reader, error) { + exportersConfigured := 0 + var exportFunc func() (sdkmetric.Reader, error) + + if exporter.Console != nil { + exportersConfigured++ + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + + exp, err := stdoutmetric.New( + stdoutmetric.WithEncoder(enc), + ) + if err != nil { + return nil, err + } + exportFunc = func() (sdkmetric.Reader, error) { + return sdkmetric.NewPeriodicReader(exp, opts...), nil + } + } + if exporter.OTLPHttp != nil { + exportersConfigured++ + exp, err := otlpHTTPMetricExporter(ctx, exporter.OTLPHttp) + if err != nil { + return nil, err + } + exportFunc = func() (sdkmetric.Reader, error) { + return sdkmetric.NewPeriodicReader(exp, opts...), nil + } + } + if exporter.OTLPGrpc != nil { + exportersConfigured++ + exp, err := otlpGRPCMetricExporter(ctx, exporter.OTLPGrpc) + if err != nil { + return nil, err + } + exportFunc = func() (sdkmetric.Reader, error) { + return sdkmetric.NewPeriodicReader(exp, opts...), nil + } + } + if exporter.OTLPFileDevelopment != nil { + // TODO: implement file exporter https://github.com/open-telemetry/opentelemetry-go/issues/5408 + return nil, newErrInvalid("otlp_file/development") + } + + if exportersConfigured > 1 { + return nil, newErrInvalid("must not specify multiple exporters") + } + + if exportFunc != nil { + return exportFunc() + } + + return nil, newErrInvalid("no valid metric exporter") +} + +func otlpHTTPMetricExporter(ctx context.Context, otlpConfig *OTLPHttpMetricExporter) (sdkmetric.Exporter, error) { + opts := []otlpmetrichttp.Option{} + + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) + if err != nil { + return nil, errors.Join(newErrInvalid("endpoint parsing failed"), err) + } + opts = append(opts, otlpmetrichttp.WithEndpoint(u.Host)) + + if u.Scheme == "http" { + opts = append(opts, otlpmetrichttp.WithInsecure()) + } + if u.Path != "" { + opts = append(opts, otlpmetrichttp.WithURLPath(u.Path)) + } + } + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.GzipCompression)) + case compressionNone: + opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.NoCompression)) + default: + return nil, newErrInvalid(fmt.Sprintf("unsupported compression %q", *otlpConfig.Compression)) + } + } + if otlpConfig.Timeout != nil { + opts = append(opts, otlpmetrichttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) + if err != nil { + return nil, err + } + if len(headersConfig) > 0 { + opts = append(opts, otlpmetrichttp.WithHeaders(headersConfig)) + } + if otlpConfig.TemporalityPreference != nil { + switch *otlpConfig.TemporalityPreference { + case "delta": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(deltaTemporality)) + case "cumulative": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(cumulativeTemporality)) + case "low_memory": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(lowMemory)) + default: + return nil, newErrInvalid(fmt.Sprintf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference)) + } + } + if otlpConfig.Tls != nil { + tlsConfig, err := tls.CreateConfig(otlpConfig.Tls.CaFile, otlpConfig.Tls.CertFile, otlpConfig.Tls.KeyFile) + if err != nil { + return nil, errors.Join(newErrInvalid("tls configuration"), err) + } + opts = append(opts, otlpmetrichttp.WithTLSClientConfig(tlsConfig)) + } + + return otlpmetrichttp.New(ctx, opts...) +} + +func otlpGRPCMetricExporter(ctx context.Context, otlpConfig *OTLPGrpcMetricExporter) (sdkmetric.Exporter, error) { + var opts []otlpmetricgrpc.Option + + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) + if err != nil { + return nil, errors.Join(newErrInvalid("endpoint parsing failed"), err) + } + // ParseRequestURI leaves the Host field empty when no + // scheme is specified (i.e. localhost:4317). This check is + // here to support the case where a user may not specify a + // scheme. The code does its best effort here by using + // otlpConfig.Endpoint as-is in that case + if u.Host != "" { + opts = append(opts, otlpmetricgrpc.WithEndpoint(u.Host)) + } else { + opts = append(opts, otlpmetricgrpc.WithEndpoint(*otlpConfig.Endpoint)) + } + if u.Scheme == "http" || (u.Scheme != "https" && otlpConfig.Tls != nil && otlpConfig.Tls.Insecure != nil && *otlpConfig.Tls.Insecure) { + opts = append(opts, otlpmetricgrpc.WithInsecure()) + } + } + + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlpmetricgrpc.WithCompressor(*otlpConfig.Compression)) + case compressionNone: + // none requires no options + default: + return nil, newErrInvalid(fmt.Sprintf("unsupported compression %q", *otlpConfig.Compression)) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlpmetricgrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) + if err != nil { + return nil, err + } + if len(headersConfig) > 0 { + opts = append(opts, otlpmetricgrpc.WithHeaders(headersConfig)) + } + if otlpConfig.TemporalityPreference != nil { + switch *otlpConfig.TemporalityPreference { + case "delta": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(deltaTemporality)) + case "cumulative": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(cumulativeTemporality)) + case "low_memory": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(lowMemory)) + default: + return nil, newErrInvalid(fmt.Sprintf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference)) + } + } + + if otlpConfig.Tls != nil && (otlpConfig.Tls.CaFile != nil || otlpConfig.Tls.CertFile != nil || otlpConfig.Tls.KeyFile != nil) { + tlsConfig, err := tls.CreateConfig(otlpConfig.Tls.CaFile, otlpConfig.Tls.CertFile, otlpConfig.Tls.KeyFile) + if err != nil { + return nil, errors.Join(newErrInvalid("tls configuration"), err) + } + opts = append(opts, otlpmetricgrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))) + } + + return otlpmetricgrpc.New(ctx, opts...) +} + +func cumulativeTemporality(sdkmetric.InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality +} + +func deltaTemporality(ik sdkmetric.InstrumentKind) metricdata.Temporality { + switch ik { + case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindHistogram, sdkmetric.InstrumentKindObservableCounter: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + +func lowMemory(ik sdkmetric.InstrumentKind) metricdata.Temporality { + switch ik { + case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindHistogram: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + +// newIncludeExcludeFilter returns a Filter that includes attributes +// in the include list and excludes attributes in the excludes list. +// It returns an error if an attribute is in both lists +// +// If IncludeExclude is empty an include-all filter is returned. +func newIncludeExcludeFilter(lists *IncludeExclude) (attribute.Filter, error) { + if lists == nil { + return func(attribute.KeyValue) bool { return true }, nil + } + + included := make(map[attribute.Key]struct{}) + for _, k := range lists.Included { + included[attribute.Key(k)] = struct{}{} + } + excluded := make(map[attribute.Key]struct{}) + for _, k := range lists.Excluded { + if _, ok := included[attribute.Key(k)]; ok { + return nil, fmt.Errorf("attribute cannot be in both include and exclude list: %s", k) + } + excluded[attribute.Key(k)] = struct{}{} + } + return func(kv attribute.KeyValue) bool { + // check if a value is excluded first + if _, ok := excluded[kv.Key]; ok { + return false + } + + if len(included) == 0 { + return true + } + + _, ok := included[kv.Key] + return ok + }, nil +} + +func prometheusReader(ctx context.Context, prometheusConfig *ExperimentalPrometheusMetricExporter) (sdkmetric.Reader, error) { + if prometheusConfig.Host == nil { + return nil, newErrInvalid("host must be specified") + } + if prometheusConfig.Port == nil { + return nil, newErrInvalid("port must be specified") + } + + opts, err := prometheusReaderOpts(prometheusConfig) + if err != nil { + return nil, err + } + + reg := prometheus.NewRegistry() + opts = append(opts, otelprom.WithRegisterer(reg)) + + reader, err := otelprom.New(opts...) + if err != nil { + return nil, fmt.Errorf("error creating otel prometheus exporter: %w", err) + } + + mux := http.NewServeMux() + mux.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) + server := http.Server{ + // Timeouts are necessary to make a server resilient to attacks. + // We use values from this example: https://blog.cloudflare.com/exposing-go-on-the-internet/#:~:text=There%20are%20three%20main%20timeouts + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + IdleTimeout: 120 * time.Second, + Handler: mux, + } + + // Remove surrounding "[]" from the host definition to allow users to define the host as "[::1]" or "::1". + host := *prometheusConfig.Host + if len(host) > 2 && host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + + addr := net.JoinHostPort(host, strconv.Itoa(*prometheusConfig.Port)) + lis, err := net.Listen("tcp", addr) + if err != nil { + return nil, errors.Join( + fmt.Errorf("binding address %s for Prometheus exporter: %w", addr, err), + reader.Shutdown(ctx), + ) + } + + // Only for testing reasons, add the address to the http Server, will not be used. + server.Addr = lis.Addr().String() + + go func() { + if err := server.Serve(lis); err != nil && !errors.Is(err, http.ErrServerClosed) { + otel.Handle(fmt.Errorf("the Prometheus HTTP server exited unexpectedly: %w", err)) + } + }() + + return readerWithServer{reader, &server}, nil +} + +func validTranslationStrategy(strategy ExperimentalPrometheusTranslationStrategy) bool { + return strategy == ExperimentalPrometheusTranslationStrategyNoTranslation || + strategy == ExperimentalPrometheusTranslationStrategyNoUtf8EscapingWithSuffixes || + strategy == ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithSuffixes || + strategy == ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithoutSuffixes +} + +func prometheusReaderOpts(prometheusConfig *ExperimentalPrometheusMetricExporter) ([]otelprom.Option, error) { + var opts []otelprom.Option + if prometheusConfig.WithoutScopeInfo != nil && *prometheusConfig.WithoutScopeInfo { + opts = append(opts, otelprom.WithoutScopeInfo()) + } + if prometheusConfig.TranslationStrategy != nil { + if !validTranslationStrategy(*prometheusConfig.TranslationStrategy) { + return nil, newErrInvalid("translation strategy invalid") + } + opts = append(opts, otelprom.WithTranslationStrategy(otlptranslator.TranslationStrategyOption(*prometheusConfig.TranslationStrategy))) + } + if prometheusConfig.WithResourceConstantLabels != nil { + f, err := newIncludeExcludeFilter(prometheusConfig.WithResourceConstantLabels) + if err != nil { + return nil, err + } + opts = append(opts, otelprom.WithResourceAsConstantLabels(f)) + } + + return opts, nil +} + +type readerWithServer struct { + sdkmetric.Reader + server *http.Server +} + +func (rws readerWithServer) Shutdown(ctx context.Context) error { + return errors.Join( + rws.Reader.Shutdown(ctx), + rws.server.Shutdown(ctx), + ) +} + +func view(v View) (sdkmetric.View, error) { + inst, err := instrument(v.Selector) + if err != nil { + return nil, err + } + + s, err := stream(v.Stream) + if err != nil { + return nil, err + } + return sdkmetric.NewView(inst, s), nil +} + +func instrument(vs ViewSelector) (sdkmetric.Instrument, error) { + kind, err := instrumentKind(vs.InstrumentType) + if err != nil { + return sdkmetric.Instrument{}, fmt.Errorf("view_selector: %w", err) + } + inst := sdkmetric.Instrument{ + Name: strOrEmpty(vs.InstrumentName), + Unit: strOrEmpty(vs.Unit), + Kind: kind, + Scope: instrumentation.Scope{ + Name: strOrEmpty(vs.MeterName), + Version: strOrEmpty(vs.MeterVersion), + SchemaURL: strOrEmpty(vs.MeterSchemaUrl), + }, + } + + if instrumentIsEmpty(inst) { + return sdkmetric.Instrument{}, errors.New("view_selector: empty selector not supporter") + } + return inst, nil +} + +func stream(vs ViewStream) (sdkmetric.Stream, error) { + f, err := newIncludeExcludeFilter(vs.AttributeKeys) + if err != nil { + return sdkmetric.Stream{}, err + } + return sdkmetric.Stream{ + Name: strOrEmpty(vs.Name), + Description: strOrEmpty(vs.Description), + Aggregation: aggregation(vs.Aggregation), + AttributeFilter: f, + }, nil +} + +func aggregation(aggr *Aggregation) sdkmetric.Aggregation { + if aggr == nil { + return nil + } + + if aggr.Base2ExponentialBucketHistogram != nil { + return sdkmetric.AggregationBase2ExponentialHistogram{ + MaxSize: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxSize), + MaxScale: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxScale), + // Need to negate because config has the positive action RecordMinMax. + NoMinMax: !boolOrFalse(aggr.Base2ExponentialBucketHistogram.RecordMinMax), + } + } + if aggr.Default != nil { + // TODO: Understand what to set here. + return nil + } + if aggr.Drop != nil { + return sdkmetric.AggregationDrop{} + } + if aggr.ExplicitBucketHistogram != nil { + return sdkmetric.AggregationExplicitBucketHistogram{ + Boundaries: aggr.ExplicitBucketHistogram.Boundaries, + // Need to negate because config has the positive action RecordMinMax. + NoMinMax: !boolOrFalse(aggr.ExplicitBucketHistogram.RecordMinMax), + } + } + if aggr.LastValue != nil { + return sdkmetric.AggregationLastValue{} + } + if aggr.Sum != nil { + return sdkmetric.AggregationSum{} + } + return nil +} + +func instrumentKind(vsit *InstrumentType) (sdkmetric.InstrumentKind, error) { + if vsit == nil { + // Equivalent to instrumentKindUndefined. + return instrumentKindUndefined, nil + } + + switch *vsit { + case InstrumentTypeCounter: + return sdkmetric.InstrumentKindCounter, nil + case InstrumentTypeUpDownCounter: + return sdkmetric.InstrumentKindUpDownCounter, nil + case InstrumentTypeHistogram: + return sdkmetric.InstrumentKindHistogram, nil + case InstrumentTypeObservableCounter: + return sdkmetric.InstrumentKindObservableCounter, nil + case InstrumentTypeObservableUpDownCounter: + return sdkmetric.InstrumentKindObservableUpDownCounter, nil + case InstrumentTypeObservableGauge: + return sdkmetric.InstrumentKindObservableGauge, nil + } + + return instrumentKindUndefined, errors.New("instrument_type: invalid value") +} + +func instrumentIsEmpty(i sdkmetric.Instrument) bool { + return i.Name == "" && + i.Description == "" && + i.Kind == instrumentKindUndefined && + i.Unit == "" && + i.Scope == zeroScope +} + +func boolOrFalse(pBool *bool) bool { + if pBool == nil { + return false + } + return *pBool +} + +func int32OrZero(pInt *int) int32 { + if pInt == nil { + return 0 + } + i := *pInt + if i > math.MaxInt32 { + return math.MaxInt32 + } + if i < math.MinInt32 { + return math.MinInt32 + } + return int32(i) +} + +func strOrEmpty(pStr *string) string { + if pStr == nil { + return "" + } + return *pStr +} diff --git a/otelconf/x/metric_test.go b/otelconf/x/metric_test.go new file mode 100644 index 00000000000..80b33266375 --- /dev/null +++ b/otelconf/x/metric_test.go @@ -0,0 +1,1623 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + otelprom "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/sdk/instrumentation" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" + v1 "go.opentelemetry.io/proto/otlp/collector/metrics/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +func TestMeterProvider(t *testing.T) { + tests := []struct { + name string + cfg configOptions + wantProvider metric.MeterProvider + wantErr error + }{ + { + name: "no-meter-provider-configured", + wantProvider: noop.NewMeterProvider(), + }, + { + name: "error-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + MeterProvider: &MeterProvider{ + Readers: []MetricReader{ + { + Periodic: &PeriodicMetricReader{}, + Pull: &PullMetricReader{}, + }, + }, + }, + }, + }, + wantProvider: noop.NewMeterProvider(), + wantErr: newErrInvalid("must not specify multiple metric reader type"), + }, + { + name: "multiple-errors-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + MeterProvider: &MeterProvider{ + Readers: []MetricReader{ + { + Periodic: &PeriodicMetricReader{}, + Pull: &PullMetricReader{}, + }, + { + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + Console: &ConsoleMetricExporter{}, + OTLPGrpc: &OTLPGrpcMetricExporter{}, + }, + }, + }, + }, + }, + }, + }, + wantProvider: noop.NewMeterProvider(), + wantErr: newErrInvalid("must not specify multiple metric reader type"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mp, shutdown, err := meterProvider(tt.cfg, resource.Default()) + require.Equal(t, tt.wantProvider, mp) + assert.ErrorIs(t, err, tt.wantErr) + require.NoError(t, shutdown(t.Context())) + }) + } +} + +func TestMeterProviderOptions(t *testing.T) { + var calls int + srv := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) { + calls++ + })) + defer srv.Close() + + cfg := OpenTelemetryConfiguration{ + MeterProvider: &MeterProvider{ + Readers: []MetricReader{{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr(srv.URL), + }, + }, + }, + }}, + }, + } + + var buf bytes.Buffer + stdoutmetricExporter, err := stdoutmetric.New(stdoutmetric.WithWriter(&buf)) + require.NoError(t, err) + + res := resource.NewSchemaless(attribute.String("foo", "bar")) + sdk, err := NewSDK( + WithOpenTelemetryConfiguration(cfg), + WithMeterProviderOptions(sdkmetric.WithReader(sdkmetric.NewPeriodicReader(stdoutmetricExporter))), + WithMeterProviderOptions(sdkmetric.WithResource(res)), + ) + require.NoError(t, err) + defer func() { + assert.NoError(t, sdk.Shutdown(t.Context())) + // The exporter, which we passed in as an extra option to NewSDK, + // should be wired up to the provider in addition to the + // configuration-based OTLP exporter. + assert.NotZero(t, buf) + assert.Equal(t, 1, calls) // flushed on shutdown + + // Options provided by WithMeterProviderOptions may be overridden + // by configuration, e.g. the resource is always defined via + // configuration. + assert.NotContains(t, buf.String(), "foo") + }() + + counter, _ := sdk.MeterProvider().Meter("test").Int64Counter("counter") + counter.Add(t.Context(), 1) +} + +func TestReader(t *testing.T) { + consoleExporter, err := stdoutmetric.New( + stdoutmetric.WithPrettyPrint(), + ) + require.NoError(t, err) + ctx := t.Context() + otlpGRPCExporter, err := otlpmetricgrpc.New(ctx) + require.NoError(t, err) + otlpHTTPExporter, err := otlpmetrichttp.New(ctx) + require.NoError(t, err) + promExporter, err := otelprom.New() + require.NoError(t, err) + testCases := []struct { + name string + reader MetricReader + args any + wantErrT error + wantReader sdkmetric.Reader + }{ + { + name: "no reader", + wantErrT: newErrInvalid("no valid metric reader"), + }, + { + name: "pull/no-exporter", + reader: MetricReader{ + Pull: &PullMetricReader{}, + }, + wantErrT: newErrInvalid("no valid metric exporter"), + }, + { + name: "pull/prometheus-no-host", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: PullMetricExporter{ + PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{}, + }, + }, + }, + wantErrT: newErrInvalid("host must be specified"), + }, + { + name: "pull/prometheus-no-port", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: PullMetricExporter{ + PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{ + Host: ptr("localhost"), + }, + }, + }, + }, + wantErrT: newErrInvalid("port must be specified"), + }, + { + name: "pull/prometheus", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: PullMetricExporter{ + PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{ + Host: ptr("localhost"), + Port: ptr(0), + WithoutScopeInfo: ptr(true), + TranslationStrategy: ptr(ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithoutSuffixes), + WithResourceConstantLabels: &IncludeExclude{ + Included: []string{"include"}, + Excluded: []string{"exclude"}, + }, + }, + }, + }, + }, + wantReader: readerWithServer{promExporter, nil}, + }, + { + name: "pull/prometheus/invalid strategy", + reader: MetricReader{ + Pull: &PullMetricReader{ + Exporter: PullMetricExporter{ + PrometheusDevelopment: &ExperimentalPrometheusMetricExporter{ + Host: ptr("localhost"), + Port: ptr(0), + WithoutScopeInfo: ptr(true), + TranslationStrategy: ptr(ExperimentalPrometheusTranslationStrategy("invalid-strategy")), + WithResourceConstantLabels: &IncludeExclude{ + Included: []string{"include"}, + Excluded: []string{"exclude"}, + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("translation strategy invalid"), + }, + { + name: "periodic/otlp-grpc-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("http://localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-exporter-with-path", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("http://localhost:4318/path/123"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-good-ca-certificate", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("https://localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &GrpcTls{ + CaFile: ptr(filepath.Join("testdata", "ca.crt")), + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-bad-ca-certificate", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("https://localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &GrpcTls{ + CaFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("tls configuration"), + }, + { + name: "periodic/otlp-grpc-bad-client-certificate", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &GrpcTls{ + KeyFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + CertFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("tls configuration"), + }, + { + name: "periodic/otlp-grpc-bad-headerslist", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + HeadersList: ptr("==="), + }, + }, + }, + }, + wantErrT: newErrInvalid("invalid headers_list"), + }, + { + name: "periodic/otlp-grpc-exporter-no-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-exporter-socket-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("unix:collector.sock"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-exporter-no-scheme", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-invalid-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr(" "), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("endpoint parsing failed"), + }, + { + name: "periodic/otlp-grpc-none-compression", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-delta-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-cumulative-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: ptr(ExporterTemporalityPreferenceCumulative), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-lowmemory-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: ptr(ExporterTemporalityPreferenceLowMemory), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), + }, + { + name: "periodic/otlp-grpc-invalid-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: (*ExporterTemporalityPreference)(ptr("invalid")), + }, + }, + }, + }, + wantErrT: newErrInvalid("unsupported temporality preference \"invalid\""), + }, + { + name: "periodic/otlp-grpc-invalid-compression", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPGrpc: &OTLPGrpcMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("unsupported compression \"invalid\""), + }, + { + name: "periodic/otlp-http-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("http://localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-good-ca-certificate", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("https://localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &HttpTls{ + CaFile: ptr(filepath.Join("testdata", "ca.crt")), + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-bad-ca-certificate", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("https://localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &HttpTls{ + CaFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("tls configuration"), + }, + { + name: "periodic/otlp-http-bad-client-certificate", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &HttpTls{ + KeyFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + CertFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("tls configuration"), + }, + { + name: "periodic/otlp-http-bad-headerslist", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + HeadersList: ptr("==="), + }, + }, + }, + }, + wantErrT: newErrInvalid("invalid headers_list"), + }, + { + name: "periodic/otlp-http-exporter-with-path", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("http://localhost:4318/path/123"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-exporter-no-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-exporter-no-scheme", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-invalid-endpoint", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr(" "), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("endpoint parsing failed"), + }, + { + name: "periodic/otlp-http-none-compression", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-cumulative-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: ptr(ExporterTemporalityPreferenceCumulative), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-lowmemory-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: ptr(ExporterTemporalityPreferenceLowMemory), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-delta-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: ptr(ExporterTemporalityPreferenceDelta), + }, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), + }, + { + name: "periodic/otlp-http-invalid-temporality", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + TemporalityPreference: (*ExporterTemporalityPreference)(ptr("invalid")), + }, + }, + }, + }, + wantErrT: newErrInvalid("unsupported temporality preference \"invalid\""), + }, + { + name: "periodic/otlp-http-invalid-compression", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPHttp: &OTLPHttpMetricExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("unsupported compression \"invalid\""), + }, + { + name: "periodic/no-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{}, + }, + }, + wantErrT: newErrInvalid("no valid metric exporter"), + }, + { + name: "periodic/console-exporter", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + Console: &ConsoleMetricExporter{}, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader(consoleExporter), + }, + { + name: "periodic/console-exporter-with-extra-options", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Interval: ptr(30_000), + Timeout: ptr(5_000), + Exporter: PushMetricExporter{ + Console: &ConsoleMetricExporter{}, + }, + }, + }, + wantReader: sdkmetric.NewPeriodicReader( + consoleExporter, + sdkmetric.WithInterval(30_000*time.Millisecond), + sdkmetric.WithTimeout(5_000*time.Millisecond), + ), + }, + { + name: "periodic/otlp_file", + reader: MetricReader{ + Periodic: &PeriodicMetricReader{ + Exporter: PushMetricExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileMetricExporter{}, + }, + }, + }, + wantErrT: newErrInvalid("otlp_file/development"), + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := metricReader(t.Context(), tt.reader) + require.ErrorIs(t, err, tt.wantErrT) + if tt.wantReader == nil { + require.Nil(t, got) + } else { + require.Equal(t, reflect.TypeOf(tt.wantReader), reflect.TypeOf(got)) + var fieldName string + switch reflect.TypeOf(tt.wantReader).String() { + case "*metric.PeriodicReader": + fieldName = "exporter" + case "otelconf.readerWithServer": + fieldName = "Reader" + default: + fieldName = "e" + } + wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantReader)).FieldByName(fieldName).Elem().Type() + gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName(fieldName).Elem().Type() + require.Equal(t, wantExporterType.String(), gotExporterType.String()) + require.NoError(t, got.Shutdown(t.Context())) + } + }) + } +} + +func TestView(t *testing.T) { + testCases := []struct { + name string + view View + args any + wantErr string + matchInstrument *sdkmetric.Instrument + wantStream sdkmetric.Stream + wantResult bool + }{ + { + name: "selector/invalid_type", + view: View{ + Selector: ViewSelector{ + InstrumentType: (*InstrumentType)(ptr("invalid_type")), + }, + }, + wantErr: "view_selector: instrument_type: invalid value", + }, + { + name: "selector/invalid_type", + view: View{ + Selector: ViewSelector{}, + }, + wantErr: "view_selector: empty selector not supporter", + }, + { + name: "all selectors match", + view: View{ + Selector: ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: ptr(InstrumentTypeCounter), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{Name: "test_name", Unit: "test_unit"}, + wantResult: true, + }, + { + name: "all selectors no match name", + view: View{ + Selector: ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: ptr(InstrumentTypeCounter), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "not_match", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match unit", + view: View{ + Selector: ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: ptr(InstrumentTypeCounter), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "not_match", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match kind", + view: View{ + Selector: ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: (*InstrumentType)(ptr("histogram")), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match meter name", + view: View{ + Selector: ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: ptr(InstrumentTypeCounter), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "not_match", + Version: "test_meter_version", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match meter version", + view: View{ + Selector: ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: ptr(InstrumentTypeCounter), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "not_match", + SchemaURL: "test_schema_url", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "all selectors no match meter schema url", + view: View{ + Selector: ViewSelector{ + InstrumentName: ptr("test_name"), + InstrumentType: ptr(InstrumentTypeCounter), + Unit: ptr("test_unit"), + MeterName: ptr("test_meter_name"), + MeterVersion: ptr("test_meter_version"), + MeterSchemaUrl: ptr("test_schema_url"), + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Unit: "test_unit", + Kind: sdkmetric.InstrumentKindCounter, + Scope: instrumentation.Scope{ + Name: "test_meter_name", + Version: "test_meter_version", + SchemaURL: "not_match", + }, + }, + wantStream: sdkmetric.Stream{}, + wantResult: false, + }, + { + name: "with stream", + view: View{ + Selector: ViewSelector{ + InstrumentName: ptr("test_name"), + Unit: ptr("test_unit"), + }, + Stream: ViewStream{ + Name: ptr("new_name"), + Description: ptr("new_description"), + AttributeKeys: ptr(IncludeExclude{Included: []string{"foo", "bar"}}), + Aggregation: &Aggregation{Sum: make(SumAggregation)}, + }, + }, + matchInstrument: &sdkmetric.Instrument{ + Name: "test_name", + Description: "test_description", + Unit: "test_unit", + }, + wantStream: sdkmetric.Stream{ + Name: "new_name", + Description: "new_description", + Unit: "test_unit", + Aggregation: sdkmetric.AggregationSum{}, + }, + wantResult: true, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := view(tt.view) + if tt.wantErr != "" { + require.EqualError(t, err, tt.wantErr) + require.Nil(t, got) + } else { + require.NoError(t, err) + gotStream, gotResult := got(*tt.matchInstrument) + // Remove filter, since it cannot be compared + gotStream.AttributeFilter = nil + require.Equal(t, tt.wantStream, gotStream) + require.Equal(t, tt.wantResult, gotResult) + } + }) + } +} + +func TestInstrumentType(t *testing.T) { + testCases := []struct { + name string + instType *InstrumentType + wantErr error + wantKind sdkmetric.InstrumentKind + }{ + { + name: "nil", + wantKind: sdkmetric.InstrumentKind(0), + }, + { + name: "counter", + instType: ptr(InstrumentTypeCounter), + wantKind: sdkmetric.InstrumentKindCounter, + }, + { + name: "up_down_counter", + instType: ptr(InstrumentTypeUpDownCounter), + wantKind: sdkmetric.InstrumentKindUpDownCounter, + }, + { + name: "histogram", + instType: ptr(InstrumentTypeHistogram), + wantKind: sdkmetric.InstrumentKindHistogram, + }, + { + name: "observable_counter", + instType: ptr(InstrumentTypeObservableCounter), + wantKind: sdkmetric.InstrumentKindObservableCounter, + }, + { + name: "observable_up_down_counter", + instType: ptr(InstrumentTypeObservableUpDownCounter), + wantKind: sdkmetric.InstrumentKindObservableUpDownCounter, + }, + { + name: "observable_gauge", + instType: ptr(InstrumentTypeObservableGauge), + wantKind: sdkmetric.InstrumentKindObservableGauge, + }, + { + name: "invalid", + instType: (*InstrumentType)(ptr("invalid")), + wantErr: errors.New("instrument_type: invalid value"), + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := instrumentKind(tt.instType) + if tt.wantErr != nil { + require.Equal(t, tt.wantErr, err) + require.Zero(t, got) + } else { + require.NoError(t, err) + require.Equal(t, tt.wantKind, got) + } + }) + } +} + +func TestAggregation(t *testing.T) { + testCases := []struct { + name string + aggregation *Aggregation + wantAggregation sdkmetric.Aggregation + }{ + { + name: "nil", + wantAggregation: nil, + }, + { + name: "empty", + aggregation: &Aggregation{}, + wantAggregation: nil, + }, + { + name: "Base2ExponentialBucketHistogram empty", + aggregation: &Aggregation{ + Base2ExponentialBucketHistogram: &Base2ExponentialBucketHistogramAggregation{}, + }, + wantAggregation: sdkmetric.AggregationBase2ExponentialHistogram{ + MaxSize: 0, + MaxScale: 0, + NoMinMax: true, + }, + }, + { + name: "Base2ExponentialBucketHistogram", + aggregation: &Aggregation{ + Base2ExponentialBucketHistogram: &Base2ExponentialBucketHistogramAggregation{ + MaxSize: ptr(2), + MaxScale: ptr(3), + RecordMinMax: ptr(true), + }, + }, + wantAggregation: sdkmetric.AggregationBase2ExponentialHistogram{ + MaxSize: 2, + MaxScale: 3, + NoMinMax: false, + }, + }, + { + name: "Default", + aggregation: &Aggregation{ + Default: make(DefaultAggregation), + }, + wantAggregation: nil, + }, + { + name: "Drop", + aggregation: &Aggregation{ + Drop: make(DropAggregation), + }, + wantAggregation: sdkmetric.AggregationDrop{}, + }, + { + name: "ExplicitBucketHistogram empty", + aggregation: &Aggregation{ + ExplicitBucketHistogram: &ExplicitBucketHistogramAggregation{}, + }, + wantAggregation: sdkmetric.AggregationExplicitBucketHistogram{ + Boundaries: nil, + NoMinMax: true, + }, + }, + { + name: "ExplicitBucketHistogram", + aggregation: &Aggregation{ + ExplicitBucketHistogram: &ExplicitBucketHistogramAggregation{ + Boundaries: []float64{1, 2, 3}, + RecordMinMax: ptr(true), + }, + }, + wantAggregation: sdkmetric.AggregationExplicitBucketHistogram{ + Boundaries: []float64{1, 2, 3}, + NoMinMax: false, + }, + }, + { + name: "LastValue", + aggregation: &Aggregation{ + LastValue: make(LastValueAggregation), + }, + wantAggregation: sdkmetric.AggregationLastValue{}, + }, + { + name: "Sum", + aggregation: &Aggregation{ + Sum: make(SumAggregation), + }, + wantAggregation: sdkmetric.AggregationSum{}, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got := aggregation(tt.aggregation) + require.Equal(t, tt.wantAggregation, got) + }) + } +} + +func TestNewIncludeExcludeFilter(t *testing.T) { + testCases := []struct { + name string + attributeKeys *IncludeExclude + wantPass []string + wantFail []string + }{ + { + name: "empty", + attributeKeys: nil, + wantPass: []string{"foo", "bar"}, + wantFail: nil, + }, + { + name: "filter-with-include", + attributeKeys: ptr(IncludeExclude{ + Included: []string{"foo"}, + }), + wantPass: []string{"foo"}, + wantFail: []string{"bar"}, + }, + { + name: "filter-with-exclude", + attributeKeys: ptr(IncludeExclude{ + Excluded: []string{"foo"}, + }), + wantPass: []string{"bar"}, + wantFail: []string{"foo"}, + }, + { + name: "filter-with-include-and-exclude", + attributeKeys: ptr(IncludeExclude{ + Included: []string{"bar"}, + Excluded: []string{"foo"}, + }), + wantPass: []string{"bar"}, + wantFail: []string{"foo"}, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := newIncludeExcludeFilter(tt.attributeKeys) + require.NoError(t, err) + for _, pass := range tt.wantPass { + require.True(t, got(attribute.KeyValue{Key: attribute.Key(pass), Value: attribute.StringValue("")})) + } + for _, fail := range tt.wantFail { + require.False(t, got(attribute.KeyValue{Key: attribute.Key(fail), Value: attribute.StringValue("")})) + } + }) + } +} + +func TestNewIncludeExcludeFilterError(t *testing.T) { + _, err := newIncludeExcludeFilter(ptr(IncludeExclude{ + Included: []string{"foo"}, + Excluded: []string{"foo"}, + })) + require.Equal(t, fmt.Errorf("attribute cannot be in both include and exclude list: foo"), err) +} + +func TestPrometheusReaderOpts(t *testing.T) { + testCases := []struct { + name string + cfg ExperimentalPrometheusMetricExporter + wantOptions int + }{ + { + name: "no options", + cfg: ExperimentalPrometheusMetricExporter{}, + wantOptions: 0, + }, + { + name: "all set", + cfg: ExperimentalPrometheusMetricExporter{ + WithoutScopeInfo: ptr(true), + TranslationStrategy: ptr(ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithoutSuffixes), + WithResourceConstantLabels: &IncludeExclude{}, + }, + wantOptions: 3, + }, + { + name: "all set false", + cfg: ExperimentalPrometheusMetricExporter{ + WithoutScopeInfo: ptr(false), + TranslationStrategy: ptr(ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithSuffixes), + WithResourceConstantLabels: &IncludeExclude{}, + }, + wantOptions: 2, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + opts, err := prometheusReaderOpts(&tt.cfg) + require.NoError(t, err) + require.Len(t, opts, tt.wantOptions) + }) + } +} + +func TestPrometheusIPv6(t *testing.T) { + tests := []struct { + name string + host string + }{ + { + name: "IPv6", + host: "::1", + }, + { + name: "[IPv6]", + host: "[::1]", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + port := 0 + cfg := ExperimentalPrometheusMetricExporter{ + Host: &tt.host, + Port: &port, + WithoutScopeInfo: ptr(true), + TranslationStrategy: ptr(ExperimentalPrometheusTranslationStrategyUnderscoreEscapingWithSuffixes), + WithResourceConstantLabels: &IncludeExclude{}, + } + + rs, err := prometheusReader(t.Context(), &cfg) + t.Cleanup(func() { + //nolint:usetesting // required to avoid getting a canceled context at cleanup. + require.NoError(t, rs.Shutdown(context.Background())) + }) + require.NoError(t, err) + + hServ := rs.(readerWithServer).server + assert.True(t, strings.HasPrefix(hServ.Addr, "[::1]:")) + + resp, err := http.DefaultClient.Get("http://" + hServ.Addr + "/metrics") + t.Cleanup(func() { + require.NoError(t, resp.Body.Close()) + }) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + }) + } +} + +func Test_otlpGRPCMetricExporter(t *testing.T) { + if runtime.GOOS == "windows" { + // TODO (#7446): Fix the flakiness on Windows. + t.Skip("Test is flaky on Windows.") + } + type args struct { + ctx context.Context + otlpConfig *OTLPGrpcMetricExporter + } + tests := []struct { + name string + args args + grpcServerOpts func() ([]grpc.ServerOption, error) + }{ + { + name: "no TLS config", + args: args{ + ctx: t.Context(), + otlpConfig: &OTLPGrpcMetricExporter{ + Compression: ptr("gzip"), + Timeout: ptr(5000), + Tls: &GrpcTls{ + Insecure: ptr(true), + }, + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + grpcServerOpts: func() ([]grpc.ServerOption, error) { + return []grpc.ServerOption{}, nil + }, + }, + { + name: "with TLS config", + args: args{ + ctx: t.Context(), + otlpConfig: &OTLPGrpcMetricExporter{ + Compression: ptr("gzip"), + Timeout: ptr(5000), + Tls: &GrpcTls{ + CaFile: ptr("testdata/server-certs/server.crt"), + }, + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + grpcServerOpts: func() ([]grpc.ServerOption, error) { + opts := []grpc.ServerOption{} + tlsCreds, err := credentials.NewServerTLSFromFile("testdata/server-certs/server.crt", "testdata/server-certs/server.key") + if err != nil { + return nil, err + } + opts = append(opts, grpc.Creds(tlsCreds)) + return opts, nil + }, + }, + { + name: "with TLS config and client key", + args: args{ + ctx: t.Context(), + otlpConfig: &OTLPGrpcMetricExporter{ + Compression: ptr("gzip"), + Timeout: ptr(5000), + Tls: &GrpcTls{ + CaFile: ptr("testdata/server-certs/server.crt"), + KeyFile: ptr("testdata/client-certs/client.key"), + CertFile: ptr("testdata/client-certs/client.crt"), + }, + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + grpcServerOpts: func() ([]grpc.ServerOption, error) { + opts := []grpc.ServerOption{} + cert, err := tls.LoadX509KeyPair("testdata/server-certs/server.crt", "testdata/server-certs/server.key") + if err != nil { + return nil, err + } + caCert, err := os.ReadFile("testdata/ca.crt") + if err != nil { + return nil, err + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + tlsCreds := credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + }) + opts = append(opts, grpc.Creds(tlsCreds)) + return opts, nil + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n, err := net.Listen("tcp4", "localhost:0") + require.NoError(t, err) + + // We need to manually construct the endpoint using the port on which the server is listening. + // + // n.Addr() always returns 127.0.0.1 instead of localhost. + // But our certificate is created with CN as 'localhost', not '127.0.0.1'. + // So we have to manually form the endpoint as "localhost:". + _, port, err := net.SplitHostPort(n.Addr().String()) + require.NoError(t, err) + tt.args.otlpConfig.Endpoint = ptr("localhost:" + port) + + serverOpts, err := tt.grpcServerOpts() + require.NoError(t, err) + + startGRPCMetricCollector(t, n, serverOpts) + + exporter, err := otlpGRPCMetricExporter(tt.args.ctx, tt.args.otlpConfig) + require.NoError(t, err) + + res, err := resource.New(t.Context()) + require.NoError(t, err) + + assert.EventuallyWithT(t, func(collect *assert.CollectT) { + assert.NoError(collect, exporter.Export(context.Background(), &metricdata.ResourceMetrics{ //nolint:usetesting // required to avoid getting a canceled context. + Resource: res, + ScopeMetrics: []metricdata.ScopeMetrics{ + { + Metrics: []metricdata.Metrics{ + { + Name: "test-metric", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + { + Value: 1, + }, + }, + }, + }, + }, + }, + }, + })) + }, 10*time.Second, 1*time.Second) + }) + } +} + +// grpcMetricCollector is an OTLP gRPC server that collects all requests it receives. +type grpcMetricCollector struct { + v1.UnimplementedMetricsServiceServer +} + +var _ v1.MetricsServiceServer = (*grpcMetricCollector)(nil) + +// startGRPCMetricCollector returns a *grpcMetricCollector that is listening at the provided +// endpoint. +// +// If endpoint is an empty string, the returned collector will be listening on +// the localhost interface at an OS chosen port. +func startGRPCMetricCollector(t *testing.T, listener net.Listener, serverOptions []grpc.ServerOption) { + srv := grpc.NewServer(serverOptions...) + c := &grpcMetricCollector{} + + v1.RegisterMetricsServiceServer(srv, c) + + errCh := make(chan error, 1) + go func() { errCh <- srv.Serve(listener) }() + + t.Cleanup(func() { + srv.GracefulStop() + if err := <-errCh; err != nil && !errors.Is(err, grpc.ErrServerStopped) { + assert.NoError(t, err) + } + }) +} + +// Export handles the export req. +func (*grpcMetricCollector) Export( + _ context.Context, + _ *v1.ExportMetricsServiceRequest, +) (*v1.ExportMetricsServiceResponse, error) { + return &v1.ExportMetricsServiceResponse{}, nil +} diff --git a/otelconf/x/propagator.go b/otelconf/x/propagator.go new file mode 100644 index 00000000000..dee4db06649 --- /dev/null +++ b/otelconf/x/propagator.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x // import "go.opentelemetry.io/contrib/otelconf/x" + +import ( + "strings" + + "go.opentelemetry.io/otel/propagation" + "golang.org/x/exp/maps" + + "go.opentelemetry.io/contrib/propagators/autoprop" +) + +func newPropagator(p *Propagator) (propagation.TextMapPropagator, error) { + if p == nil { + return propagation.NewCompositeTextMapPropagator(), nil + } + + n := len(p.Composite) + if n == 0 && p.CompositeList == nil { + return propagation.NewCompositeTextMapPropagator(), nil + } + + names := map[string]struct{}{} + for _, propagator := range p.Composite { + if propagator.B3 != nil { + names["b3"] = struct{}{} + } + if propagator.B3Multi != nil { + names["b3multi"] = struct{}{} + } + if propagator.Baggage != nil { + names["baggage"] = struct{}{} + } + if propagator.Jaeger != nil { + names["jaeger"] = struct{}{} + } + if propagator.Ottrace != nil { + names["ottrace"] = struct{}{} + } + if propagator.Tracecontext != nil { + names["tracecontext"] = struct{}{} + } + } + + if p.CompositeList != nil { + for _, v := range strings.Split(*p.CompositeList, ",") { + names[v] = struct{}{} + } + } + + if len(names) == 0 { + return autoprop.NewTextMapPropagator(), nil + } + + return autoprop.TextMapPropagator(maps.Keys(names)...) +} diff --git a/otelconf/x/propagator_test.go b/otelconf/x/propagator_test.go new file mode 100644 index 00000000000..2d882c8cedc --- /dev/null +++ b/otelconf/x/propagator_test.go @@ -0,0 +1,178 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x + +import ( + "slices" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPropagator(t *testing.T) { + tests := []struct { + name string + cfg *Propagator + want []string + wantErr bool + errMsg string + }{ + { + name: "nil propagator config", + cfg: nil, + want: []string{}, + wantErr: false, + }, + { + name: "valid tracecontext", + cfg: &Propagator{ + Composite: []TextMapPropagator{ + { + Tracecontext: TraceContextPropagator{}, + }, + }, + }, + want: []string{"traceparent", "tracestate"}, + wantErr: false, + }, + { + name: "valid baggage", + cfg: &Propagator{ + Composite: []TextMapPropagator{ + { + Baggage: BaggagePropagator{}, + }, + }, + }, + want: []string{"baggage"}, + wantErr: false, + }, + { + name: "valid b3", + cfg: &Propagator{ + Composite: []TextMapPropagator{ + { + B3: B3Propagator{}, + }, + }, + }, + want: []string{"x-b3-traceid", "x-b3-spanid", "x-b3-sampled", "x-b3-flags"}, + wantErr: false, + }, + { + name: "valid b3multi", + cfg: &Propagator{ + Composite: []TextMapPropagator{ + { + B3Multi: B3MultiPropagator{}, + }, + }, + }, + want: []string{"x-b3-traceid", "x-b3-spanid", "x-b3-sampled", "x-b3-flags"}, + wantErr: false, + }, + { + name: "valid jaeger", + cfg: &Propagator{ + Composite: []TextMapPropagator{ + { + Jaeger: JaegerPropagator{}, + }, + }, + }, + want: []string{"uber-trace-id"}, + wantErr: false, + }, + { + name: "valid ottrace", + cfg: &Propagator{ + Composite: []TextMapPropagator{ + { + Ottrace: OpenTracingPropagator{}, + }, + }, + }, + want: []string{"ot-tracer-traceid", "ot-tracer-spanid", "ot-tracer-sampled"}, + wantErr: false, + }, + { + name: "multiple propagators", + cfg: &Propagator{ + Composite: []TextMapPropagator{ + { + Tracecontext: TraceContextPropagator{}, + }, + { + Baggage: BaggagePropagator{}, + }, + { + B3: B3Propagator{}, + }, + }, + }, + want: []string{"tracestate", "baggage", "x-b3-traceid", "x-b3-spanid", "x-b3-sampled", "x-b3-flags", "traceparent"}, + wantErr: false, + }, + { + name: "empty composite", + cfg: &Propagator{ + Composite: []TextMapPropagator{ + {}, + }, + }, + want: []string{"tracestate", "baggage", "traceparent"}, + wantErr: false, + }, + { + name: "multiple propagators via composite_list", + cfg: &Propagator{ + CompositeList: ptr("tracecontext,baggage,b3"), + }, + want: []string{"tracestate", "baggage", "x-b3-traceid", "x-b3-spanid", "x-b3-sampled", "x-b3-flags", "traceparent"}, + wantErr: false, + }, + { + name: "valid xray", + cfg: &Propagator{ + CompositeList: ptr("xray"), + }, + want: []string{"X-Amzn-Trace-Id"}, + wantErr: false, + }, + { + name: "empty propagator name", + cfg: &Propagator{ + CompositeList: ptr(""), + }, + want: []string{}, + wantErr: true, + errMsg: "unknown propagator", + }, + { + name: "unsupported propagator", + cfg: &Propagator{ + CompositeList: ptr("random-garbage,baggage,b3"), + }, + want: []string{}, + wantErr: true, + errMsg: "unknown propagator", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := newPropagator(tt.cfg) + if tt.wantErr { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.errMsg) + return + } + assert.NoError(t, err) + slices.Sort(tt.want) + gotFields := got.Fields() + slices.Sort(gotFields) + assert.Equal(t, tt.want, gotFields) + }) + } +} diff --git a/otelconf/x/resource.go b/otelconf/x/resource.go new file mode 100644 index 00000000000..e950ba48f7a --- /dev/null +++ b/otelconf/x/resource.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x // import "go.opentelemetry.io/contrib/otelconf/x" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + + "go.opentelemetry.io/contrib/otelconf/internal/kv" +) + +func resourceOpts(detectors []ExperimentalResourceDetector) []resource.Option { + opts := []resource.Option{} + for _, d := range detectors { + if d.Container != nil { + opts = append(opts, resource.WithContainer()) + } + if d.Host != nil { + opts = append(opts, resource.WithHost(), resource.WithHostID()) + } + if d.Process != nil { + opts = append(opts, resource.WithProcess()) + } + // TODO: implement service: + // Waiting on https://github.com/open-telemetry/opentelemetry-go/pull/7642 + } + return opts +} + +func newResource(r *Resource) (*resource.Resource, error) { + if r == nil { + return resource.Default(), nil + } + + attrs := make([]attribute.KeyValue, 0, len(r.Attributes)) + for _, v := range r.Attributes { + attrs = append(attrs, kv.FromNameValue(v.Name, v.Value)) + } + + var schema string + if r.SchemaUrl != nil { + schema = *r.SchemaUrl + } + opts := []resource.Option{ + resource.WithAttributes(attrs...), + resource.WithSchemaURL(schema), + } + + if r.DetectionDevelopment != nil { + opts = append(opts, resourceOpts(r.DetectionDevelopment.Detectors)...) + } + + return resource.New(context.Background(), opts...) +} diff --git a/otelconf/x/resource_test.go b/otelconf/x/resource_test.go new file mode 100644 index 00000000000..4de673c0aa2 --- /dev/null +++ b/otelconf/x/resource_test.go @@ -0,0 +1,83 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.39.0" +) + +func TestNewResource(t *testing.T) { + tests := []struct { + name string + config *Resource + wantResource *resource.Resource + wantErrT error + }{ + { + name: "no-resource-configuration", + wantResource: resource.Default(), + }, + { + name: "resource-no-attributes", + config: &Resource{}, + wantResource: resource.NewSchemaless(), + }, + { + name: "resource-with-schema", + config: &Resource{ + SchemaUrl: ptr(semconv.SchemaURL), + }, + wantResource: resource.NewWithAttributes(semconv.SchemaURL), + }, + { + name: "resource-with-attributes", + config: &Resource{ + Attributes: []AttributeNameValue{ + {Name: string(semconv.ServiceNameKey), Value: "service-a"}, + }, + }, + wantResource: resource.NewWithAttributes("", + semconv.ServiceName("service-a"), + ), + }, + { + name: "resource-with-attributes-and-schema", + config: &Resource{ + Attributes: []AttributeNameValue{ + {Name: string(semconv.ServiceNameKey), Value: "service-a"}, + }, + SchemaUrl: ptr(semconv.SchemaURL), + }, + wantResource: resource.NewWithAttributes(semconv.SchemaURL, + semconv.ServiceName("service-a"), + ), + }, + { + name: "resource-with-additional-attributes-and-schema", + config: &Resource{ + Attributes: []AttributeNameValue{ + {Name: string(semconv.ServiceNameKey), Value: "service-a"}, + {Name: "attr-bool", Value: true}, + }, + SchemaUrl: ptr(semconv.SchemaURL), + }, + wantResource: resource.NewWithAttributes(semconv.SchemaURL, + semconv.ServiceName("service-a"), + attribute.Bool("attr-bool", true)), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := newResource(tt.config) + require.ErrorIs(t, tt.wantErrT, err) + assert.Equal(t, tt.wantResource, got) + }) + } +} diff --git a/otelconf/x/trace.go b/otelconf/x/trace.go new file mode 100644 index 00000000000..c3f3ec6cf80 --- /dev/null +++ b/otelconf/x/trace.go @@ -0,0 +1,313 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x // import "go.opentelemetry.io/contrib/otelconf/x" + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/contrib/otelconf/internal/tls" +) + +var errInvalidSamplerConfiguration = newErrInvalid("sampler configuration") + +func tracerProvider(cfg configOptions, res *resource.Resource) (trace.TracerProvider, shutdownFunc, error) { + if cfg.opentelemetryConfig.TracerProvider == nil { + return noop.NewTracerProvider(), noopShutdown, nil + } + + opts := append(cfg.tracerProviderOptions, sdktrace.WithResource(res)) + + var errs []error + for _, processor := range cfg.opentelemetryConfig.TracerProvider.Processors { + sp, err := spanProcessor(cfg.ctx, processor) + if err == nil { + opts = append(opts, sdktrace.WithSpanProcessor(sp)) + } else { + errs = append(errs, err) + } + } + if s, err := sampler(cfg.opentelemetryConfig.TracerProvider.Sampler); err == nil { + opts = append(opts, sdktrace.WithSampler(s)) + } else { + errs = append(errs, err) + } + if len(errs) > 0 { + return noop.NewTracerProvider(), noopShutdown, errors.Join(errs...) + } + tp := sdktrace.NewTracerProvider(opts...) + return tp, tp.Shutdown, nil +} + +func parentBasedSampler(s *ParentBasedSampler) (sdktrace.Sampler, error) { + var rootSampler sdktrace.Sampler + var opts []sdktrace.ParentBasedSamplerOption + var errs []error + var err error + + if s.Root == nil { + rootSampler = sdktrace.AlwaysSample() + } else { + rootSampler, err = sampler(s.Root) + if err != nil { + errs = append(errs, err) + } + } + if s.RemoteParentSampled != nil { + remoteParentSampler, err := sampler(s.RemoteParentSampled) + if err != nil { + errs = append(errs, err) + } else { + opts = append(opts, sdktrace.WithRemoteParentSampled(remoteParentSampler)) + } + } + if s.RemoteParentNotSampled != nil { + remoteParentNotSampler, err := sampler(s.RemoteParentNotSampled) + if err != nil { + errs = append(errs, err) + } else { + opts = append(opts, sdktrace.WithRemoteParentNotSampled(remoteParentNotSampler)) + } + } + if s.LocalParentSampled != nil { + localParentSampler, err := sampler(s.LocalParentSampled) + if err != nil { + errs = append(errs, err) + } else { + opts = append(opts, sdktrace.WithLocalParentSampled(localParentSampler)) + } + } + if s.LocalParentNotSampled != nil { + localParentNotSampler, err := sampler(s.LocalParentNotSampled) + if err != nil { + errs = append(errs, err) + } else { + opts = append(opts, sdktrace.WithLocalParentNotSampled(localParentNotSampler)) + } + } + if len(errs) > 0 { + return nil, errors.Join(errs...) + } + return sdktrace.ParentBased(rootSampler, opts...), nil +} + +func sampler(s *Sampler) (sdktrace.Sampler, error) { + if s == nil { + // If omitted, parent based sampler with a root of always_on is used. + return sdktrace.ParentBased(sdktrace.AlwaysSample()), nil + } + if s.ParentBased != nil { + return parentBasedSampler(s.ParentBased) + } + if s.AlwaysOff != nil { + return sdktrace.NeverSample(), nil + } + if s.AlwaysOn != nil { + return sdktrace.AlwaysSample(), nil + } + if s.TraceIDRatioBased != nil { + if s.TraceIDRatioBased.Ratio == nil { + return sdktrace.TraceIDRatioBased(1), nil + } + return sdktrace.TraceIDRatioBased(*s.TraceIDRatioBased.Ratio), nil + } + return nil, errInvalidSamplerConfiguration +} + +func spanExporter(ctx context.Context, exporter SpanExporter) (sdktrace.SpanExporter, error) { + exportersConfigured := 0 + var exportFunc func() (sdktrace.SpanExporter, error) + + if exporter.Console != nil { + exportersConfigured++ + exportFunc = func() (sdktrace.SpanExporter, error) { + return stdouttrace.New( + stdouttrace.WithPrettyPrint(), + ) + } + } + if exporter.OTLPHttp != nil { + exportersConfigured++ + exportFunc = func() (sdktrace.SpanExporter, error) { + return otlpHTTPSpanExporter(ctx, exporter.OTLPHttp) + } + } + if exporter.OTLPGrpc != nil { + exportersConfigured++ + exportFunc = func() (sdktrace.SpanExporter, error) { + return otlpGRPCSpanExporter(ctx, exporter.OTLPGrpc) + } + } + if exporter.OTLPFileDevelopment != nil { + // TODO: implement file exporter https://github.com/open-telemetry/opentelemetry-go/issues/5408 + return nil, newErrInvalid("otlp_file/development") + } + + if exportersConfigured > 1 { + return nil, newErrInvalid("must not specify multiple exporters") + } + + if exportFunc != nil { + return exportFunc() + } + return nil, newErrInvalid("no valid span exporter") +} + +func spanProcessor(ctx context.Context, processor SpanProcessor) (sdktrace.SpanProcessor, error) { + if processor.Batch != nil && processor.Simple != nil { + return nil, newErrInvalid("must not specify multiple span processor type") + } + if processor.Batch != nil { + exp, err := spanExporter(ctx, processor.Batch.Exporter) + if err != nil { + return nil, err + } + return batchSpanProcessor(processor.Batch, exp) + } + if processor.Simple != nil { + exp, err := spanExporter(ctx, processor.Simple.Exporter) + if err != nil { + return nil, err + } + return sdktrace.NewSimpleSpanProcessor(exp), nil + } + return nil, newErrInvalid("unsupported span processor type, must be one of simple or batch") +} + +func otlpGRPCSpanExporter(ctx context.Context, otlpConfig *OTLPGrpcExporter) (sdktrace.SpanExporter, error) { + var opts []otlptracegrpc.Option + + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) + if err != nil { + return nil, errors.Join(newErrInvalid("endpoint parsing failed"), err) + } + // ParseRequestURI leaves the Host field empty when no + // scheme is specified (i.e. localhost:4317). This check is + // here to support the case where a user may not specify a + // scheme. The code does its best effort here by using + // otlpConfig.Endpoint as-is in that case. + if u.Host != "" { + opts = append(opts, otlptracegrpc.WithEndpoint(u.Host)) + } else { + opts = append(opts, otlptracegrpc.WithEndpoint(*otlpConfig.Endpoint)) + } + + if u.Scheme == "http" || (u.Scheme != "https" && otlpConfig.Tls != nil && otlpConfig.Tls.Insecure != nil && *otlpConfig.Tls.Insecure) { + opts = append(opts, otlptracegrpc.WithInsecure()) + } + } + + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlptracegrpc.WithCompressor(*otlpConfig.Compression)) + case compressionNone: + // none requires no options + default: + return nil, newErrInvalid(fmt.Sprintf("unsupported compression %q", *otlpConfig.Compression)) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlptracegrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) + if err != nil { + return nil, err + } + if len(headersConfig) > 0 { + opts = append(opts, otlptracegrpc.WithHeaders(headersConfig)) + } + + if otlpConfig.Tls != nil && (otlpConfig.Tls.CaFile != nil || otlpConfig.Tls.CertFile != nil || otlpConfig.Tls.KeyFile != nil) { + tlsConfig, err := tls.CreateConfig(otlpConfig.Tls.CaFile, otlpConfig.Tls.CertFile, otlpConfig.Tls.KeyFile) + if err != nil { + return nil, errors.Join(newErrInvalid("tls configuration"), err) + } + opts = append(opts, otlptracegrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))) + } + + return otlptracegrpc.New(ctx, opts...) +} + +func otlpHTTPSpanExporter(ctx context.Context, otlpConfig *OTLPHttpExporter) (sdktrace.SpanExporter, error) { + var opts []otlptracehttp.Option + + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) + if err != nil { + return nil, errors.Join(newErrInvalid("endpoint parsing failed"), err) + } + opts = append(opts, otlptracehttp.WithEndpoint(u.Host)) + + if u.Scheme == "http" { + opts = append(opts, otlptracehttp.WithInsecure()) + } + if u.Path != "" { + opts = append(opts, otlptracehttp.WithURLPath(u.Path)) + } + } + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlptracehttp.WithCompression(otlptracehttp.GzipCompression)) + case compressionNone: + opts = append(opts, otlptracehttp.WithCompression(otlptracehttp.NoCompression)) + default: + return nil, newErrInvalid(fmt.Sprintf("unsupported compression %q", *otlpConfig.Compression)) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlptracehttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) + if err != nil { + return nil, err + } + if len(headersConfig) > 0 { + opts = append(opts, otlptracehttp.WithHeaders(headersConfig)) + } + + if otlpConfig.Tls != nil { + tlsConfig, err := tls.CreateConfig(otlpConfig.Tls.CaFile, otlpConfig.Tls.CertFile, otlpConfig.Tls.KeyFile) + if err != nil { + return nil, errors.Join(newErrInvalid("tls configuration"), err) + } + opts = append(opts, otlptracehttp.WithTLSClientConfig(tlsConfig)) + } + + return otlptracehttp.New(ctx, opts...) +} + +func batchSpanProcessor(bsp *BatchSpanProcessor, exp sdktrace.SpanExporter) (sdktrace.SpanProcessor, error) { + var opts []sdktrace.BatchSpanProcessorOption + if err := validateBatchSpanProcessor(bsp); err != nil { + return nil, err + } + if bsp.ExportTimeout != nil { + opts = append(opts, sdktrace.WithExportTimeout(time.Millisecond*time.Duration(*bsp.ExportTimeout))) + } + if bsp.MaxExportBatchSize != nil { + opts = append(opts, sdktrace.WithMaxExportBatchSize(*bsp.MaxExportBatchSize)) + } + if bsp.MaxQueueSize != nil { + opts = append(opts, sdktrace.WithMaxQueueSize(*bsp.MaxQueueSize)) + } + if bsp.ScheduleDelay != nil { + opts = append(opts, sdktrace.WithBatchTimeout(time.Millisecond*time.Duration(*bsp.ScheduleDelay))) + } + return sdktrace.NewBatchSpanProcessor(exp, opts...), nil +} diff --git a/otelconf/x/trace_test.go b/otelconf/x/trace_test.go new file mode 100644 index 00000000000..d293e0e5409 --- /dev/null +++ b/otelconf/x/trace_test.go @@ -0,0 +1,1065 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "errors" + "net" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + v1 "go.opentelemetry.io/proto/otlp/collector/trace/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +func TestTracerProvider(t *testing.T) { + tests := []struct { + name string + cfg configOptions + wantProvider trace.TracerProvider + wantErr error + }{ + { + name: "no-tracer-provider-configured", + wantProvider: noop.NewTracerProvider(), + }, + { + name: "error-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + TracerProvider: &TracerProvider{ + Processors: []SpanProcessor{ + { + Batch: &BatchSpanProcessor{}, + Simple: &SimpleSpanProcessor{}, + }, + }, + }, + }, + }, + wantProvider: noop.NewTracerProvider(), + wantErr: newErrInvalid("must not specify multiple span processor type"), + }, + { + name: "multiple-errors-in-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + TracerProvider: &TracerProvider{ + Processors: []SpanProcessor{ + { + Batch: &BatchSpanProcessor{}, + Simple: &SimpleSpanProcessor{}, + }, + { + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + OTLPHttp: &OTLPHttpExporter{}, + }, + }, + }, + }, + }, + }, + }, + wantProvider: noop.NewTracerProvider(), + wantErr: newErrInvalid("must not specify multiple exporters"), + }, + { + name: "invalid-sampler-config", + cfg: configOptions{ + opentelemetryConfig: OpenTelemetryConfiguration{ + TracerProvider: &TracerProvider{ + Processors: []SpanProcessor{ + { + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + }, + Sampler: &Sampler{}, + }, + }, + }, + wantProvider: noop.NewTracerProvider(), + wantErr: errInvalidSamplerConfiguration, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tp, shutdown, err := tracerProvider(tt.cfg, resource.Default()) + require.Equal(t, tt.wantProvider, tp) + assert.ErrorIs(t, err, tt.wantErr) + require.NoError(t, shutdown(t.Context())) + }) + } +} + +func TestTracerProviderOptions(t *testing.T) { + var calls int + srv := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) { + calls++ + })) + defer srv.Close() + + cfg := OpenTelemetryConfiguration{ + TracerProvider: &TracerProvider{ + Processors: []SpanProcessor{{ + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr(srv.URL), + }, + }, + }, + }}, + }, + } + + var buf bytes.Buffer + stdouttraceExporter, err := stdouttrace.New(stdouttrace.WithWriter(&buf)) + require.NoError(t, err) + + res := resource.NewSchemaless(attribute.String("foo", "bar")) + sdk, err := NewSDK( + WithOpenTelemetryConfiguration(cfg), + WithTracerProviderOptions(sdktrace.WithSyncer(stdouttraceExporter)), + WithTracerProviderOptions(sdktrace.WithResource(res)), + ) + require.NoError(t, err) + defer func() { + assert.NoError(t, sdk.Shutdown(t.Context())) + }() + + // The exporter, which we passed in as an extra option to NewSDK, + // should be wired up to the provider in addition to the + // configuration-based OTLP exporter. + tracer := sdk.TracerProvider().Tracer("test") + _, span := tracer.Start(t.Context(), "span") + span.End() + assert.NotZero(t, buf) + assert.Equal(t, 1, calls) + // Options provided by WithMeterProviderOptions may be overridden + // by configuration, e.g. the resource is always defined via + // configuration. + assert.NotContains(t, buf.String(), "foo") +} + +func TestSpanProcessor(t *testing.T) { + consoleExporter, err := stdouttrace.New( + stdouttrace.WithPrettyPrint(), + ) + require.NoError(t, err) + ctx := t.Context() + otlpGRPCExporter, err := otlptracegrpc.New(ctx) + require.NoError(t, err) + otlpHTTPExporter, err := otlptracehttp.New(ctx) + require.NoError(t, err) + testCases := []struct { + name string + processor SpanProcessor + args any + wantErrT error + wantProcessor sdktrace.SpanProcessor + }{ + { + name: "no processor", + wantErrT: newErrInvalid("unsupported span processor type, must be one of simple or batch"), + }, + { + name: "multiple processor types", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{}, + }, + Simple: &SimpleSpanProcessor{}, + }, + wantErrT: newErrInvalid("must not specify multiple span processor type"), + }, + { + name: "batch processor invalid exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{}, + }, + }, + wantErrT: newErrInvalid("no valid span exporter"), + }, + { + name: "batch processor invalid batch size console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(-1), + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantErrT: newErrGreaterThanZero("max_export_batch_size"), + }, + { + name: "batch processor invalid export timeout console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + ExportTimeout: ptr(-2), + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantErrT: newErrGreaterOrEqualZero("export_timeout"), + }, + { + name: "batch processor invalid queue size console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxQueueSize: ptr(-3), + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantErrT: newErrGreaterThanZero("max_queue_size"), + }, + { + name: "batch processor invalid schedule delay console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + ScheduleDelay: ptr(-4), + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantErrT: newErrGreaterOrEqualZero("schedule_delay"), + }, + { + name: "batch processor with multiple exporters", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + OTLPHttp: &OTLPHttpExporter{}, + }, + }, + }, + wantErrT: newErrInvalid("must not specify multiple exporters"), + }, + { + name: "batch processor console exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(consoleExporter), + }, + { + name: "batch/otlp-grpc-exporter-no-endpoint", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("http://localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-exporter-socket-endpoint", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("unix:collector.sock"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-good-ca-certificate", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &GrpcTls{ + CaFile: ptr(filepath.Join("testdata", "ca.crt")), + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-bad-ca-certificate", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &GrpcTls{ + CaFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("tls configuration"), + }, + { + name: "batch/otlp-grpc-bad-client-certificate", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &GrpcTls{ + KeyFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + CertFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("tls configuration"), + }, + { + name: "batch/otlp-grpc-bad-headerslist", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + HeadersList: ptr("==="), + }, + }, + }, + }, + wantErrT: newErrInvalid("invalid headers_list"), + }, + { + name: "batch/otlp-grpc-exporter-no-scheme", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), + }, + { + name: "batch/otlp-grpc-invalid-endpoint", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr(" "), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("endpoint parsing failed"), + }, + { + name: "batch/otlp-grpc-invalid-compression", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPGrpc: &OTLPGrpcExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("unsupported compression \"invalid\""), + }, + { + name: "batch/otlp-http-exporter", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("http://localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-good-ca-certificate", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &HttpTls{ + CaFile: ptr(filepath.Join("testdata", "ca.crt")), + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-bad-ca-certificate", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &HttpTls{ + CaFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("tls configuration"), + }, + { + name: "batch/otlp-http-bad-client-certificate", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Tls: &HttpTls{ + KeyFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + CertFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("tls configuration"), + }, + { + name: "batch/otlp-http-bad-headerslist", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4317"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + HeadersList: ptr("==="), + }, + }, + }, + }, + wantErrT: newErrInvalid("invalid headers_list"), + }, + { + name: "batch/otlp-http-exporter-with-path", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("http://localhost:4318/path/123"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-exporter-no-endpoint", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-exporter-no-scheme", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-invalid-endpoint", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr(" "), + Compression: ptr("gzip"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("endpoint parsing failed"), + }, + { + name: "batch/otlp-http-none-compression", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("none"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), + }, + { + name: "batch/otlp-http-invalid-compression", + processor: SpanProcessor{ + Batch: &BatchSpanProcessor{ + MaxExportBatchSize: ptr(1), + ExportTimeout: ptr(0), + MaxQueueSize: ptr(1), + ScheduleDelay: ptr(0), + Exporter: SpanExporter{ + OTLPHttp: &OTLPHttpExporter{ + Endpoint: ptr("localhost:4318"), + Compression: ptr("invalid"), + Timeout: ptr(1000), + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + }, + }, + wantErrT: newErrInvalid("unsupported compression \"invalid\""), + }, + { + name: "simple/no-exporter", + processor: SpanProcessor{ + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{}, + }, + }, + wantErrT: newErrInvalid("no valid span exporter"), + }, + { + name: "simple/console-exporter", + processor: SpanProcessor{ + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + }, + }, + }, + wantProcessor: sdktrace.NewSimpleSpanProcessor(consoleExporter), + }, + { + name: "simple/otlp_file", + processor: SpanProcessor{ + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + OTLPFileDevelopment: &ExperimentalOTLPFileExporter{}, + }, + }, + }, + wantErrT: newErrInvalid("otlp_file/development"), + }, + { + name: "simple/multiple", + processor: SpanProcessor{ + Simple: &SimpleSpanProcessor{ + Exporter: SpanExporter{ + Console: ConsoleExporter{}, + OTLPGrpc: &OTLPGrpcExporter{}, + }, + }, + }, + wantErrT: newErrInvalid("must not specify multiple exporters"), + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, err := spanProcessor(t.Context(), tt.processor) + require.ErrorIs(t, err, tt.wantErrT) + if tt.wantProcessor == nil { + require.Nil(t, got) + } else { + require.Equal(t, reflect.TypeOf(tt.wantProcessor), reflect.TypeOf(got)) + var fieldName string + switch reflect.TypeOf(tt.wantProcessor).String() { + case "*trace.simpleSpanProcessor": + fieldName = "exporter" + default: + fieldName = "e" + } + wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantProcessor)).FieldByName(fieldName).Elem().Type() + gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName(fieldName).Elem().Type() + require.Equal(t, wantExporterType.String(), gotExporterType.String()) + } + }) + } +} + +func TestSampler(t *testing.T) { + for _, tt := range []struct { + name string + sampler *Sampler + wantSampler sdktrace.Sampler + wantError error + }{ + { + name: "no sampler configuration, return default", + sampler: nil, + wantSampler: sdktrace.ParentBased(sdktrace.AlwaysSample()), + }, + { + name: "invalid sampler configuration, return error", + sampler: &Sampler{}, + wantSampler: nil, + wantError: errInvalidSamplerConfiguration, + }, + { + name: "sampler configuration always on", + sampler: &Sampler{ + AlwaysOn: AlwaysOnSampler{}, + }, + wantSampler: sdktrace.AlwaysSample(), + }, + { + name: "sampler configuration always off", + sampler: &Sampler{ + AlwaysOff: AlwaysOffSampler{}, + }, + wantSampler: sdktrace.NeverSample(), + }, + { + name: "sampler configuration trace ID ratio", + sampler: &Sampler{ + TraceIDRatioBased: &TraceIDRatioBasedSampler{ + Ratio: ptr(0.54), + }, + }, + wantSampler: sdktrace.TraceIDRatioBased(0.54), + }, + { + name: "sampler configuration trace ID ratio no ratio", + sampler: &Sampler{ + TraceIDRatioBased: &TraceIDRatioBasedSampler{}, + }, + wantSampler: sdktrace.TraceIDRatioBased(1), + }, + { + name: "sampler configuration parent based no options", + sampler: &Sampler{ + ParentBased: &ParentBasedSampler{}, + }, + wantSampler: sdktrace.ParentBased(sdktrace.AlwaysSample()), + }, + { + name: "sampler configuration parent based many options", + sampler: &Sampler{ + ParentBased: &ParentBasedSampler{ + Root: &Sampler{ + AlwaysOff: AlwaysOffSampler{}, + }, + RemoteParentNotSampled: &Sampler{ + AlwaysOn: AlwaysOnSampler{}, + }, + RemoteParentSampled: &Sampler{ + TraceIDRatioBased: &TraceIDRatioBasedSampler{ + Ratio: ptr(0.009), + }, + }, + LocalParentNotSampled: &Sampler{ + AlwaysOff: AlwaysOffSampler{}, + }, + LocalParentSampled: &Sampler{ + TraceIDRatioBased: &TraceIDRatioBasedSampler{ + Ratio: ptr(0.05), + }, + }, + }, + }, + wantSampler: sdktrace.ParentBased( + sdktrace.NeverSample(), + sdktrace.WithLocalParentNotSampled(sdktrace.NeverSample()), + sdktrace.WithLocalParentSampled(sdktrace.TraceIDRatioBased(0.05)), + sdktrace.WithRemoteParentNotSampled(sdktrace.AlwaysSample()), + sdktrace.WithRemoteParentSampled(sdktrace.TraceIDRatioBased(0.009)), + ), + }, + { + name: "sampler configuration with many errors", + sampler: &Sampler{ + ParentBased: &ParentBasedSampler{ + Root: &Sampler{}, + RemoteParentNotSampled: &Sampler{}, + RemoteParentSampled: &Sampler{}, + LocalParentNotSampled: &Sampler{}, + LocalParentSampled: &Sampler{}, + }, + }, + wantError: errors.Join( + errInvalidSamplerConfiguration, + errInvalidSamplerConfiguration, + errInvalidSamplerConfiguration, + errInvalidSamplerConfiguration, + errInvalidSamplerConfiguration, + ), + }, + } { + t.Run(tt.name, func(t *testing.T) { + got, err := sampler(tt.sampler) + if tt.wantError != nil { + require.Error(t, err) + require.EqualError(t, err, tt.wantError.Error()) + } else { + require.NoError(t, err) + } + + require.Equal(t, tt.wantSampler, got) + }) + } +} + +func Test_otlpGRPCTraceExporter(t *testing.T) { + type args struct { + ctx context.Context + otlpConfig *OTLPGrpcExporter + } + tests := []struct { + name string + args args + grpcServerOpts func() ([]grpc.ServerOption, error) + }{ + { + name: "no TLS config", + args: args{ + ctx: t.Context(), + otlpConfig: &OTLPGrpcExporter{ + Compression: ptr("gzip"), + Timeout: ptr(5000), + Tls: &GrpcTls{ + Insecure: ptr(true), + }, + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + grpcServerOpts: func() ([]grpc.ServerOption, error) { + return []grpc.ServerOption{}, nil + }, + }, + { + name: "with TLS config", + args: args{ + ctx: t.Context(), + otlpConfig: &OTLPGrpcExporter{ + Compression: ptr("gzip"), + Timeout: ptr(5000), + Tls: &GrpcTls{ + CaFile: ptr("testdata/server-certs/server.crt"), + }, + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + grpcServerOpts: func() ([]grpc.ServerOption, error) { + opts := []grpc.ServerOption{} + tlsCreds, err := credentials.NewServerTLSFromFile("testdata/server-certs/server.crt", "testdata/server-certs/server.key") + if err != nil { + return nil, err + } + opts = append(opts, grpc.Creds(tlsCreds)) + return opts, nil + }, + }, + { + name: "with TLS config and client key", + args: args{ + ctx: t.Context(), + otlpConfig: &OTLPGrpcExporter{ + Compression: ptr("gzip"), + Timeout: ptr(5000), + Tls: &GrpcTls{ + CaFile: ptr("testdata/server-certs/server.crt"), + KeyFile: ptr("testdata/client-certs/client.key"), + CertFile: ptr("testdata/client-certs/client.crt"), + }, + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, + }, + }, + }, + grpcServerOpts: func() ([]grpc.ServerOption, error) { + opts := []grpc.ServerOption{} + cert, err := tls.LoadX509KeyPair("testdata/server-certs/server.crt", "testdata/server-certs/server.key") + if err != nil { + return nil, err + } + caCert, err := os.ReadFile("testdata/ca.crt") + if err != nil { + return nil, err + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + tlsCreds := credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + }) + opts = append(opts, grpc.Creds(tlsCreds)) + return opts, nil + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n, err := net.Listen("tcp4", "localhost:0") + require.NoError(t, err) + + // We need to manually construct the endpoint using the port on which the server is listening. + // + // n.Addr() always returns 127.0.0.1 instead of localhost. + // But our certificate is created with CN as 'localhost', not '127.0.0.1'. + // So we have to manually form the endpoint as "localhost:". + _, port, err := net.SplitHostPort(n.Addr().String()) + require.NoError(t, err) + tt.args.otlpConfig.Endpoint = ptr("localhost:" + port) + + serverOpts, err := tt.grpcServerOpts() + require.NoError(t, err) + + startGRPCTraceCollector(t, n, serverOpts) + + exporter, err := otlpGRPCSpanExporter(tt.args.ctx, tt.args.otlpConfig) + require.NoError(t, err) + + input := tracetest.SpanStubs{ + { + Name: "test-span", + }, + } + + assert.EventuallyWithT(t, func(collect *assert.CollectT) { + assert.NoError(collect, exporter.ExportSpans(context.Background(), input.Snapshots())) //nolint:usetesting // required to avoid getting a canceled context. + }, 10*time.Second, 1*time.Second) + }) + } +} + +// grpcTraceCollector is an OTLP gRPC server that collects all requests it receives. +type grpcTraceCollector struct { + v1.UnimplementedTraceServiceServer +} + +var _ v1.TraceServiceServer = (*grpcTraceCollector)(nil) + +// startGRPCTraceCollector returns a *grpcTraceCollector that is listening at the provided +// endpoint. +// +// If endpoint is an empty string, the returned collector will be listening on +// the localhost interface at an OS chosen port. +func startGRPCTraceCollector(t *testing.T, listener net.Listener, serverOptions []grpc.ServerOption) { + srv := grpc.NewServer(serverOptions...) + c := &grpcTraceCollector{} + + v1.RegisterTraceServiceServer(srv, c) + + errCh := make(chan error, 1) + go func() { errCh <- srv.Serve(listener) }() + + t.Cleanup(func() { + srv.GracefulStop() + if err := <-errCh; err != nil && !errors.Is(err, grpc.ErrServerStopped) { + assert.NoError(t, err) + } + }) +} + +// Export handles the export req. +func (*grpcTraceCollector) Export( + _ context.Context, + _ *v1.ExportTraceServiceRequest, +) (*v1.ExportTraceServiceResponse, error) { + return &v1.ExportTraceServiceResponse{}, nil +} From 1c1179a4e5bba8677fc92f9183666e3038c7a55d Mon Sep 17 00:00:00 2001 From: alex boten <223565+codeboten@users.noreply.github.com> Date: Mon, 9 Feb 2026 13:38:05 -0800 Subject: [PATCH 2/5] add script to automatically remove experimental features Signed-off-by: alex boten <223565+codeboten@users.noreply.github.com> --- Makefile | 13 +++-- otelconf/remove_experimental_patch.sed | 69 ++++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 4 deletions(-) create mode 100644 otelconf/remove_experimental_patch.sed diff --git a/Makefile b/Makefile index 7646a64b851..f2cd7518598 100644 --- a/Makefile +++ b/Makefile @@ -326,7 +326,8 @@ OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR=tmp/opentelemetry-configuration genjsonschema-cleanup: rm -Rf ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR} -GENERATED_CONFIG=./otelconf/x/generated_config.go +GENERATED_EXPERIMENTAL_CONFIG=./otelconf/x/generated_config.go +GENERATED_STABLE_CONFIG=./otelconf/generated_config.go # Generate structs for configuration from opentelemetry-configuration schema genjsonschema: genjsonschema-cleanup $(GOJSONSCHEMA) @@ -338,11 +339,15 @@ genjsonschema: genjsonschema-cleanup $(GOJSONSCHEMA) --struct-name-from-title \ --package x \ --only-models \ - --output ${GENERATED_CONFIG} \ + --output ${GENERATED_EXPERIMENTAL_CONFIG} \ ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR}/opentelemetry_configuration.json @echo Modify jsonschema generated files. - sed -f ./otelconf/jsonschema_patch.sed ${GENERATED_CONFIG} > ${GENERATED_CONFIG}.tmp - mv ${GENERATED_CONFIG}.tmp ${GENERATED_CONFIG} + sed -f ./otelconf/jsonschema_patch.sed ${GENERATED_EXPERIMENTAL_CONFIG} > ${GENERATED_EXPERIMENTAL_CONFIG}.tmp + cp ${GENERATED_EXPERIMENTAL_CONFIG}.tmp ${GENERATED_EXPERIMENTAL_CONFIG} + sed -f ./otelconf/remove_experimental_patch.sed ${GENERATED_EXPERIMENTAL_CONFIG}.tmp > ${GENERATED_STABLE_CONFIG}.tmp + rm ${GENERATED_EXPERIMENTAL_CONFIG}.tmp + mv ${GENERATED_STABLE_CONFIG}.tmp ${GENERATED_STABLE_CONFIG} + $(GO) fmt ${GENERATED_STABLE_CONFIG} $(MAKE) genjsonschema-cleanup .PHONY: codespell diff --git a/otelconf/remove_experimental_patch.sed b/otelconf/remove_experimental_patch.sed new file mode 100644 index 00000000000..ff933d0727f --- /dev/null +++ b/otelconf/remove_experimental_patch.sed @@ -0,0 +1,69 @@ +#!/bin/sed -f + +# Rename package +s+^package x+package otelconf+g + +# Remove experimental const definitions +/^const Experimental/d + +# Remove single-line experimental type definitions (aliases to basic types) +/^type Experimental.*\*int$/d +/^type Experimental.*\*float64$/d +/^type Experimental.*\*string$/d +/^type Experimental.*\*bool$/d +/^type Experimental.*string$/d +/^type Experimental.*map\[string\]interface{}$/d +/^type Experimental.*map\[string\]map\[string\]interface{}$/d + +# Remove array type definitions +/^type Experimental.*\[\]Experimental/d + +# Remove multi-line experimental struct type definitions using address range +/^type Experimental.*struct {$/,/^}$/d + +# Remove struct fields that reference Experimental types (including Development suffix fields) +/^ .*Development.*\*Experimental/d +/^ [A-Z][A-Za-z0-9_]* \*Experimental/d +/^ [A-Z][A-Za-z0-9_]* Experimental[A-Z]/d +/^ [A-Z][A-Za-z0-9_]* \[\]Experimental/d + +# Remove comment lines that reference experimental types +/^[[:space:]]*\/\/ .*Experimental/d +/^[[:space:]]*\/\/ A rule for Experimental/d + +# Remove comment blocks before experimental Development fields +# Pattern: multi-line comment block ending with empty comment line, followed by Development field +/^ \/\/ Configure exporter to be OTLP with file transport\.$/,/^ \/\/$/d +/^ \/\/ Configure loggers\.$/,/^ \/\/$/d +/^ \/\/ Configure meters\.$/,/^ \/\/$/d +/^ \/\/ Configure instrumentation\.$/,/^ \/\/$/d +/^ \/\/ Configure exporter to be prometheus\.$/,/^ \/\/$/d +/^ \/\/ Configure resource detection\.$/,/^ \/\/$/d +/^ \/\/ Configure sampler to be composite\.$/,/^ \/\/$/d +/^ \/\/ Configure sampler to be jaeger_remote\.$/,/^ \/\/$/d +/^ \/\/ Configure sampler to be probability\.$/,/^ \/\/$/d +/^ \/\/ Configure tracers\.$/,/^ \/\/$/d + +# Remove orphaned top-level comment blocks (from deleted type definitions) +# Probability sampler ratio comments +/^\/\/ Configure ratio\.$/,/^$/d + +# Rule-based sampler comments +/^\/\/ match conditions - the sampler will be applied/,/^$/d +/^\/\/ The rules for the sampler, matched in order\./,/^$/d + +# Jaeger remote sampler comments +/^\/\/ Configure the polling interval.*to fetch from the remote$/,/^$/d + +# Logger config comments +/^\/\/ Configure if the logger is enabled or not\.$/,/^$/d +/^\/\/ Configure trace based filtering\.$/,/^$/d + +# OTLP file exporter comments +/^\/\/ Configure output stream\.$/,/^$/d + +# Prometheus exporter comments +/^\/\/ Configure host\.$/,/^$/d +/^\/\/ Configure port\.$/,/^$/d +/^\/\/ Configure Prometheus Exporter to produce metrics without a scope info metric\.$/,/^$/d +/^\/\/ Configure Prometheus Exporter to produce metrics without a target info metric$/,/^$/d From 561b4d7ed9868d85710308d0cf21ba34c480fee5 Mon Sep 17 00:00:00 2001 From: alex boten <223565+codeboten@users.noreply.github.com> Date: Mon, 9 Feb 2026 14:14:39 -0800 Subject: [PATCH 3/5] fix tests Signed-off-by: alex boten <223565+codeboten@users.noreply.github.com> --- otelconf/x/fuzz_test.go | 4 ++-- otelconf/x/metric_test.go | 32 ++++++++++++++++---------------- otelconf/x/trace_test.go | 30 +++++++++++++++--------------- 3 files changed, 33 insertions(+), 33 deletions(-) diff --git a/otelconf/x/fuzz_test.go b/otelconf/x/fuzz_test.go index 7852ba91130..b1a55317e4d 100644 --- a/otelconf/x/fuzz_test.go +++ b/otelconf/x/fuzz_test.go @@ -15,7 +15,7 @@ import ( ) func FuzzJSON(f *testing.F) { - b, err := os.ReadFile(filepath.Join("testdata", "v1.0.0.json")) + b, err := os.ReadFile(filepath.Join("..", "testdata", "v1.0.0.json")) require.NoError(f, err) f.Add(b) @@ -40,7 +40,7 @@ func FuzzJSON(f *testing.F) { } func FuzzYAML(f *testing.F) { - b, err := os.ReadFile(filepath.Join("testdata", "v1.0.0.yaml")) + b, err := os.ReadFile(filepath.Join("..", "testdata", "v1.0.0.yaml")) require.NoError(f, err) f.Add(b) diff --git a/otelconf/x/metric_test.go b/otelconf/x/metric_test.go index 80b33266375..777d90b44e4 100644 --- a/otelconf/x/metric_test.go +++ b/otelconf/x/metric_test.go @@ -293,7 +293,7 @@ func TestReader(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(1000), Tls: &GrpcTls{ - CaFile: ptr(filepath.Join("testdata", "ca.crt")), + CaFile: ptr(filepath.Join("..", "testdata", "ca.crt")), }, }, }, @@ -311,7 +311,7 @@ func TestReader(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(1000), Tls: &GrpcTls{ - CaFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + CaFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), }, }, }, @@ -329,8 +329,8 @@ func TestReader(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(1000), Tls: &GrpcTls{ - KeyFile: ptr(filepath.Join("testdata", "bad_cert.crt")), - CertFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + KeyFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + CertFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), }, }, }, @@ -565,7 +565,7 @@ func TestReader(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(1000), Tls: &HttpTls{ - CaFile: ptr(filepath.Join("testdata", "ca.crt")), + CaFile: ptr(filepath.Join("..", "testdata", "ca.crt")), }, }, }, @@ -583,7 +583,7 @@ func TestReader(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(1000), Tls: &HttpTls{ - CaFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + CaFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), }, }, }, @@ -601,8 +601,8 @@ func TestReader(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(1000), Tls: &HttpTls{ - KeyFile: ptr(filepath.Join("testdata", "bad_cert.crt")), - CertFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + KeyFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + CertFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), }, }, }, @@ -870,7 +870,7 @@ func TestReader(t *testing.T) { switch reflect.TypeOf(tt.wantReader).String() { case "*metric.PeriodicReader": fieldName = "exporter" - case "otelconf.readerWithServer": + case "x.readerWithServer": fieldName = "Reader" default: fieldName = "e" @@ -1479,7 +1479,7 @@ func Test_otlpGRPCMetricExporter(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(5000), Tls: &GrpcTls{ - CaFile: ptr("testdata/server-certs/server.crt"), + CaFile: ptr("../testdata/server-certs/server.crt"), }, Headers: []NameStringValuePair{ {Name: "test", Value: ptr("test1")}, @@ -1488,7 +1488,7 @@ func Test_otlpGRPCMetricExporter(t *testing.T) { }, grpcServerOpts: func() ([]grpc.ServerOption, error) { opts := []grpc.ServerOption{} - tlsCreds, err := credentials.NewServerTLSFromFile("testdata/server-certs/server.crt", "testdata/server-certs/server.key") + tlsCreds, err := credentials.NewServerTLSFromFile("../testdata/server-certs/server.crt", "../testdata/server-certs/server.key") if err != nil { return nil, err } @@ -1504,9 +1504,9 @@ func Test_otlpGRPCMetricExporter(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(5000), Tls: &GrpcTls{ - CaFile: ptr("testdata/server-certs/server.crt"), - KeyFile: ptr("testdata/client-certs/client.key"), - CertFile: ptr("testdata/client-certs/client.crt"), + CaFile: ptr("../testdata/server-certs/server.crt"), + KeyFile: ptr("../testdata/client-certs/client.key"), + CertFile: ptr("../testdata/client-certs/client.crt"), }, Headers: []NameStringValuePair{ {Name: "test", Value: ptr("test1")}, @@ -1515,11 +1515,11 @@ func Test_otlpGRPCMetricExporter(t *testing.T) { }, grpcServerOpts: func() ([]grpc.ServerOption, error) { opts := []grpc.ServerOption{} - cert, err := tls.LoadX509KeyPair("testdata/server-certs/server.crt", "testdata/server-certs/server.key") + cert, err := tls.LoadX509KeyPair("../testdata/server-certs/server.crt", "../testdata/server-certs/server.key") if err != nil { return nil, err } - caCert, err := os.ReadFile("testdata/ca.crt") + caCert, err := os.ReadFile("../testdata/ca.crt") if err != nil { return nil, err } diff --git a/otelconf/x/trace_test.go b/otelconf/x/trace_test.go index d293e0e5409..b7adc377e88 100644 --- a/otelconf/x/trace_test.go +++ b/otelconf/x/trace_test.go @@ -359,7 +359,7 @@ func TestSpanProcessor(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(1000), Tls: &GrpcTls{ - CaFile: ptr(filepath.Join("testdata", "ca.crt")), + CaFile: ptr(filepath.Join("..", "testdata", "ca.crt")), }, }, }, @@ -377,7 +377,7 @@ func TestSpanProcessor(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(1000), Tls: &GrpcTls{ - CaFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + CaFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), }, }, }, @@ -395,8 +395,8 @@ func TestSpanProcessor(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(1000), Tls: &GrpcTls{ - KeyFile: ptr(filepath.Join("testdata", "bad_cert.crt")), - CertFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + KeyFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + CertFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), }, }, }, @@ -518,7 +518,7 @@ func TestSpanProcessor(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(1000), Tls: &HttpTls{ - CaFile: ptr(filepath.Join("testdata", "ca.crt")), + CaFile: ptr(filepath.Join("..", "testdata", "ca.crt")), }, }, }, @@ -536,7 +536,7 @@ func TestSpanProcessor(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(1000), Tls: &HttpTls{ - CaFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + CaFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), }, }, }, @@ -554,8 +554,8 @@ func TestSpanProcessor(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(1000), Tls: &HttpTls{ - KeyFile: ptr(filepath.Join("testdata", "bad_cert.crt")), - CertFile: ptr(filepath.Join("testdata", "bad_cert.crt")), + KeyFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), + CertFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), }, }, }, @@ -936,7 +936,7 @@ func Test_otlpGRPCTraceExporter(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(5000), Tls: &GrpcTls{ - CaFile: ptr("testdata/server-certs/server.crt"), + CaFile: ptr("../testdata/server-certs/server.crt"), }, Headers: []NameStringValuePair{ {Name: "test", Value: ptr("test1")}, @@ -945,7 +945,7 @@ func Test_otlpGRPCTraceExporter(t *testing.T) { }, grpcServerOpts: func() ([]grpc.ServerOption, error) { opts := []grpc.ServerOption{} - tlsCreds, err := credentials.NewServerTLSFromFile("testdata/server-certs/server.crt", "testdata/server-certs/server.key") + tlsCreds, err := credentials.NewServerTLSFromFile("../testdata/server-certs/server.crt", "../testdata/server-certs/server.key") if err != nil { return nil, err } @@ -961,9 +961,9 @@ func Test_otlpGRPCTraceExporter(t *testing.T) { Compression: ptr("gzip"), Timeout: ptr(5000), Tls: &GrpcTls{ - CaFile: ptr("testdata/server-certs/server.crt"), - KeyFile: ptr("testdata/client-certs/client.key"), - CertFile: ptr("testdata/client-certs/client.crt"), + CaFile: ptr("../testdata/server-certs/server.crt"), + KeyFile: ptr("../testdata/client-certs/client.key"), + CertFile: ptr("../testdata/client-certs/client.crt"), }, Headers: []NameStringValuePair{ {Name: "test", Value: ptr("test1")}, @@ -972,11 +972,11 @@ func Test_otlpGRPCTraceExporter(t *testing.T) { }, grpcServerOpts: func() ([]grpc.ServerOption, error) { opts := []grpc.ServerOption{} - cert, err := tls.LoadX509KeyPair("testdata/server-certs/server.crt", "testdata/server-certs/server.key") + cert, err := tls.LoadX509KeyPair("../testdata/server-certs/server.crt", "../testdata/server-certs/server.key") if err != nil { return nil, err } - caCert, err := os.ReadFile("testdata/ca.crt") + caCert, err := os.ReadFile("../testdata/ca.crt") if err != nil { return nil, err } From badc46060aa0026481926d99bbcbb7c0ddb33a7e Mon Sep 17 00:00:00 2001 From: alex boten <223565+codeboten@users.noreply.github.com> Date: Wed, 11 Feb 2026 14:27:28 -0800 Subject: [PATCH 4/5] changelog Signed-off-by: alex boten <223565+codeboten@users.noreply.github.com> --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dadd92c7bf7..1a355ed877a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ### Changed - Updated the configuration schema used in `go.opentelemetry.io/contrib/otelconf` to [rc.3](https://github.com/open-telemetry/opentelemetry-configuration/releases/tag/v1.0.0-rc.3). (#8505) +- Moved experimental types from `go.opentelemetry.io/contrib/otelconf` to `go.opentelemetry.io/contrib/otelconf/x`. (#8529) From 426a0bdf26a6eec3523d90e0263fa1d4d9875a35 Mon Sep 17 00:00:00 2001 From: alex boten <223565+codeboten@users.noreply.github.com> Date: Tue, 24 Feb 2026 08:10:59 -0800 Subject: [PATCH 5/5] fix lint Signed-off-by: alex boten <223565+codeboten@users.noreply.github.com> --- otelconf/metric.go | 15 +-------------- otelconf/x/config.go | 2 +- otelconf/x/doc.go | 2 +- 3 files changed, 3 insertions(+), 16 deletions(-) diff --git a/otelconf/metric.go b/otelconf/metric.go index edeb2d7378c..8e4c6652dca 100644 --- a/otelconf/metric.go +++ b/otelconf/metric.go @@ -9,7 +9,6 @@ import ( "errors" "fmt" "math" - "net/http" "net/url" "os" "time" @@ -88,7 +87,7 @@ func metricReader(ctx context.Context, r MetricReader) (sdkmetric.Reader, error) return nil, newErrInvalid("no valid metric reader") } -func pullReader(ctx context.Context, exporter PullMetricExporter) (sdkmetric.Reader, error) { +func pullReader(_ context.Context, _ PullMetricExporter) (sdkmetric.Reader, error) { return nil, newErrInvalid("no valid metric exporter") } @@ -328,18 +327,6 @@ func newIncludeExcludeFilter(lists *IncludeExclude) (attribute.Filter, error) { }, nil } -type readerWithServer struct { - sdkmetric.Reader - server *http.Server -} - -func (rws readerWithServer) Shutdown(ctx context.Context) error { - return errors.Join( - rws.Reader.Shutdown(ctx), - rws.server.Shutdown(ctx), - ) -} - func view(v View) (sdkmetric.View, error) { inst, err := instrument(v.Selector) if err != nil { diff --git a/otelconf/x/config.go b/otelconf/x/config.go index 18ab2aaa290..689856c4c1d 100644 --- a/otelconf/x/config.go +++ b/otelconf/x/config.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package otelconf provides an OpenTelemetry declarative configuration SDK. +// Package x provides an OpenTelemetry declarative configuration SDK. package x // import "go.opentelemetry.io/contrib/otelconf/x" import ( diff --git a/otelconf/x/doc.go b/otelconf/x/doc.go index ea077bc31e4..baaa60ce996 100644 --- a/otelconf/x/doc.go +++ b/otelconf/x/doc.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package otelconf can be used to parse a configuration file that follows +// Package x can be used to parse a configuration file that follows // the JSON Schema defined by the OpenTelemetry Configuration schema. Different // versions of the schema are supported by the code in the directory that // matches the version number of the schema. For example, the import