diff --git a/.chloggen/metricsaslogsconnector.yaml b/.chloggen/metricsaslogsconnector.yaml new file mode 100644 index 0000000000000..92f70070fc02b --- /dev/null +++ b/.chloggen/metricsaslogsconnector.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: new_component + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: connector/metricsaslogs + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add connector to convert metrics to logs + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [40938] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.codecov.yml b/.codecov.yml index 91a0bfd7cf522..fc75df05c7627 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -64,6 +64,10 @@ component_management: name: connector_grafanacloud paths: - connector/grafanacloudconnector/** + - component_id: connector_metricsaslogs + name: connector_metricsaslogs + paths: + - connector/metricsaslogsconnector/** - component_id: connector_otlpjson name: connector_otlpjson paths: diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index c6623cecacef4..6bd82a1a8f7be 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -33,6 +33,7 @@ connector/datadogconnector/ @open-telemetry connector/exceptionsconnector/ @open-telemetry/collector-contrib-approvers @marctc connector/failoverconnector/ @open-telemetry/collector-contrib-approvers @akats7 @fatsheep9146 connector/grafanacloudconnector/ @open-telemetry/collector-contrib-approvers @rlankfo @jcreixell +connector/metricsaslogsconnector/ @open-telemetry/collector-contrib-approvers @atoulme connector/otlpjsonconnector/ @open-telemetry/collector-contrib-approvers @ChrsMark connector/roundrobinconnector/ @open-telemetry/collector-contrib-approvers @bogdandrutu connector/routingconnector/ @open-telemetry/collector-contrib-approvers @mwear @TylerHelmuth @evan-bradley @edmocosta diff --git a/.github/ISSUE_TEMPLATE/beta_stability.yaml b/.github/ISSUE_TEMPLATE/beta_stability.yaml index 0fcac1a031cd6..ef347ebf0844e 100644 --- a/.github/ISSUE_TEMPLATE/beta_stability.yaml +++ b/.github/ISSUE_TEMPLATE/beta_stability.yaml @@ -30,6 +30,7 @@ body: - connector/exceptions - connector/failover - connector/grafanacloud + - connector/metricsaslogs - connector/otlpjson - connector/roundrobin - connector/routing diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index c6a4169080354..4b4893bc7826f 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -33,6 +33,7 @@ body: - connector/exceptions - connector/failover - connector/grafanacloud + - connector/metricsaslogs - connector/otlpjson - connector/roundrobin - connector/routing diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml index 50946f751bffd..a6a8404e975c5 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -27,6 +27,7 @@ body: - connector/exceptions - connector/failover - connector/grafanacloud + - connector/metricsaslogs - connector/otlpjson - connector/roundrobin - connector/routing diff --git a/.github/ISSUE_TEMPLATE/other.yaml b/.github/ISSUE_TEMPLATE/other.yaml index 2e4a967f2a51c..570f31b7e4f62 100644 --- a/.github/ISSUE_TEMPLATE/other.yaml +++ b/.github/ISSUE_TEMPLATE/other.yaml @@ -27,6 +27,7 @@ body: - connector/exceptions - connector/failover - connector/grafanacloud + - connector/metricsaslogs - connector/otlpjson - connector/roundrobin - connector/routing diff --git a/.github/ISSUE_TEMPLATE/unmaintained.yaml b/.github/ISSUE_TEMPLATE/unmaintained.yaml index f6727cb344533..2d788af0fd272 100644 --- a/.github/ISSUE_TEMPLATE/unmaintained.yaml +++ b/.github/ISSUE_TEMPLATE/unmaintained.yaml @@ -32,6 +32,7 @@ body: - connector/exceptions - connector/failover - connector/grafanacloud + - connector/metricsaslogs - connector/otlpjson - connector/roundrobin - connector/routing diff --git a/.github/component_labels.txt b/.github/component_labels.txt index 743a8dd331fa4..15d5b6e4277df 100644 --- a/.github/component_labels.txt +++ b/.github/component_labels.txt @@ -14,6 +14,7 @@ connector/datadogconnector connector/datadog connector/exceptionsconnector connector/exceptions connector/failoverconnector connector/failover connector/grafanacloudconnector connector/grafanacloud +connector/metricsaslogsconnector connector/metricsaslogs connector/otlpjsonconnector connector/otlpjson connector/roundrobinconnector connector/roundrobin connector/routingconnector connector/routing diff --git a/connector/metricsaslogsconnector/Makefile b/connector/metricsaslogsconnector/Makefile new file mode 100644 index 0000000000000..ded7a36092dc3 --- /dev/null +++ b/connector/metricsaslogsconnector/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/connector/metricsaslogsconnector/README.md b/connector/metricsaslogsconnector/README.md new file mode 100644 index 0000000000000..9a2f6cb7d55cb --- /dev/null +++ b/connector/metricsaslogsconnector/README.md @@ -0,0 +1,150 @@ +# metricsaslogsconnector + + +| Status | | +| ------------- |-----------| +| Distributions | [] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aconnector%2Fmetricsaslogs%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aconnector%2Fmetricsaslogs) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aconnector%2Fmetricsaslogs%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aconnector%2Fmetricsaslogs) | +| Code coverage | [![codecov](https://codecov.io/github/open-telemetry/opentelemetry-collector-contrib/graph/main/badge.svg?component=connector_metricsaslogs)](https://app.codecov.io/gh/open-telemetry/opentelemetry-collector-contrib/tree/main/?components%5B0%5D=connector_metricsaslogs&displayType=list) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@atoulme](https://www.github.com/atoulme) | + +[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development + +## Supported Pipeline Types + +| [Exporter Pipeline Type] | [Receiver Pipeline Type] | [Stability Level] | +| ------------------------ | ------------------------ | ----------------- | +| metrics | logs | [development] | + +[Exporter Pipeline Type]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/connector/README.md#exporter-pipeline-type +[Receiver Pipeline Type]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/connector/README.md#receiver-pipeline-type +[Stability Level]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#stability-levels + + +This connector converts OpenTelemetry metrics into logs, creating one log entry per metric data point. Each metric data point is transformed into a structured log record with configurable JSON body format. + +## Current Limitations + +⚠️ **Current implementation discards the following metric features:** +- Metric exemplars +- Advanced metadata + +These features may be added in future iterations. + +## Configuration + +The following settings can be optionally configured: + +- `include_resource_attributes` (default = `true`): Whether to include resource attributes in the generated logs +- `include_scope_info` (default = `true`): Whether to include instrumentation scope information in the generated logs + +## Log Body Format + +The connector always generates log bodies in the following JSON format: +```json +{"metric_name": "$NAME", "value": "$VALUE"} +``` + +Where: +- `$NAME` is the actual metric name +- `$VALUE` is the metric value (simple values for gauge/sum, complex JSON for histogram/summary) + +## Example Usage + +### Basic Configuration + +```yaml +connectors: + metricsaslogs: + +service: + pipelines: + logs: + receivers: [metricsaslogs] + processors: [] + exporters: [logging] + metrics: + receivers: [otlp] + processors: [] + exporters: [metricsaslogs] +``` + +### Advanced Configuration + +```yaml +connectors: + metricsaslogs: + include_resource_attributes: false + include_scope_info: false +``` + + +### Example Metric Conversions + +For a gauge metric `cpu_usage` with value `85.2`: +```json +{ + "body": {"metric_name": "cpu_usage", "value": "85.2"}, + "attributes": { + "metric.name": "cpu_usage", + "metric.type": "Gauge", + "metric.description": "CPU usage percentage", + "metric.unit": "%" + } +} +``` + +For a histogram metric `request_duration`: +```json +{ + "body": { + "metric_name": "request_duration", + "value": "{\"count\":100,\"sum\":1.5,\"bucket_counts\":[10,50,40],\"explicit_bounds\":[0.1,0.5,1.0]}" + }, + "attributes": { + "metric.name": "request_duration", + "metric.type": "Histogram", + "metric.description": "Request duration in seconds", + "metric.unit": "s", + "metric.aggregation_temporality": "Delta" + } +} +``` + +## Output Structure + +Each metric data point is converted to a log record with: + +- **Body**: Fixed JSON format: `{"metric_name": "$NAME", "value": "$VALUE"}` +- **Timestamp**: Metric data point timestamp +- **Observed Timestamp**: Metric data point start timestamp (if available) +- **Attributes**: + - Original metric data point attributes (labels) + - `metric.name`: The metric name + - `metric.type`: The metric type (Gauge, Sum, Histogram, etc.) + - `metric.description`: Metric description (if available) + - `metric.unit`: Metric unit (if available) + - Additional type-specific attributes: + - For Sum metrics: `metric.is_monotonic`, `metric.aggregation_temporality` + - For Histogram/ExponentialHistogram: `metric.aggregation_temporality` + - Resource attributes (if `include_resource_attributes` is true) + - Instrumentation scope information (if `include_scope_info` is true) + +## Supported Metric Types + +All OpenTelemetry metric types are supported: + +- **Gauge**: Point-in-time measurements +- **Sum**: Cumulative or delta measurements +- **Histogram**: Distribution of measurements with buckets +- **Exponential Histogram**: Distribution with exponentially sized buckets +- **Summary**: Distribution with quantile values + +## Value Encoding + +The `value` field in the JSON body contains: + +- For simple metrics (Gauge, Sum): numeric value as string +- For complex metrics (Histogram, etc.): JSON-encoded object as string + +All JSON special characters in metric names and values are properly escaped. diff --git a/connector/metricsaslogsconnector/config.go b/connector/metricsaslogsconnector/config.go new file mode 100644 index 0000000000000..77d9701f5fed2 --- /dev/null +++ b/connector/metricsaslogsconnector/config.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metricsaslogsconnector // import "github.com/open-telemetry/opentelemetry-collector-contrib/connector/metricsaslogsconnector" + +import ( + "go.opentelemetry.io/collector/component" +) + +type Config struct { + IncludeResourceAttributes bool `mapstructure:"include_resource_attributes"` + + IncludeScopeInfo bool `mapstructure:"include_scope_info"` + + _ struct{} +} + +func (*Config) Validate() error { + return nil +} + +func createDefaultConfig() component.Config { + return &Config{ + IncludeResourceAttributes: true, + IncludeScopeInfo: true, + } +} diff --git a/connector/metricsaslogsconnector/connector.go b/connector/metricsaslogsconnector/connector.go new file mode 100644 index 0000000000000..cb9d7941af90b --- /dev/null +++ b/connector/metricsaslogsconnector/connector.go @@ -0,0 +1,289 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metricsaslogsconnector // import "github.com/open-telemetry/opentelemetry-collector-contrib/connector/metricsaslogsconnector" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" +) + +const ( + attrMetricName = "metric.name" + attrMetricType = "metric.type" + attrMetricDescription = "metric.description" + attrMetricUnit = "metric.unit" + attrMetricIsMonotonic = "metric.is_monotonic" + attrMetricAggregationTemporality = "metric.aggregation_temporality" + + attrGaugeValue = "gauge.value" + attrSumValue = "sum.value" + + attrHistogramCount = "histogram.count" + attrHistogramSum = "histogram.sum" + attrHistogramMin = "histogram.min" + attrHistogramMax = "histogram.max" + attrHistogramBucketCounts = "histogram.bucket_counts" + attrHistogramExplicitBounds = "histogram.explicit_bounds" + + attrExponentialHistogramCount = "exponential_histogram.count" + attrExponentialHistogramSum = "exponential_histogram.sum" + attrExponentialHistogramScale = "exponential_histogram.scale" + attrExponentialHistogramZeroCount = "exponential_histogram.zero_count" + attrExponentialHistogramMin = "exponential_histogram.min" + attrExponentialHistogramMax = "exponential_histogram.max" + + attrSummaryCount = "summary.count" + attrSummarySum = "summary.sum" + attrSummaryQuantileValues = "summary.quantile_values" + attrQuantile = "quantile" + attrValue = "value" +) + +type metricsAsLogs struct { + logsConsumer consumer.Logs + config *Config + logger *zap.Logger + component.StartFunc + component.ShutdownFunc +} + +func (*metricsAsLogs) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: false} +} + +func (m *metricsAsLogs) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { + logs := plog.NewLogs() + + for i := 0; i < md.ResourceMetrics().Len(); i++ { + resourceMetric := md.ResourceMetrics().At(i) + m.processResourceMetrics(resourceMetric, logs) + } + + if logs.ResourceLogs().Len() > 0 { + return m.logsConsumer.ConsumeLogs(ctx, logs) + } + + return nil +} + +func (m *metricsAsLogs) processResourceMetrics(resourceMetric pmetric.ResourceMetrics, logs plog.Logs) { + resourceLogs := logs.ResourceLogs().AppendEmpty() + + if m.config.IncludeResourceAttributes { + resourceMetric.Resource().Attributes().CopyTo(resourceLogs.Resource().Attributes()) + } + + resourceLogs.SetSchemaUrl(resourceMetric.SchemaUrl()) + + for j := 0; j < resourceMetric.ScopeMetrics().Len(); j++ { + scopeMetric := resourceMetric.ScopeMetrics().At(j) + m.processScopeMetrics(scopeMetric, resourceLogs) + } +} + +func (m *metricsAsLogs) processScopeMetrics(scopeMetric pmetric.ScopeMetrics, resourceLogs plog.ResourceLogs) { + scopeLogs := resourceLogs.ScopeLogs().AppendEmpty() + + if m.config.IncludeScopeInfo { + scopeMetric.Scope().CopyTo(scopeLogs.Scope()) + } + + scopeLogs.SetSchemaUrl(scopeMetric.SchemaUrl()) + + for k := 0; k < scopeMetric.Metrics().Len(); k++ { + metric := scopeMetric.Metrics().At(k) + m.processMetric(metric, &scopeLogs) + } +} + +func (m *metricsAsLogs) processMetric(metric pmetric.Metric, scopeLogs *plog.ScopeLogs) { + switch metric.Type() { + case pmetric.MetricTypeGauge: + m.processGaugeDataPointsWithMetric(metric, scopeLogs) + case pmetric.MetricTypeSum: + m.processSumDataPointsWithMetric(metric, scopeLogs) + case pmetric.MetricTypeHistogram: + m.processHistogramDataPointsWithMetric(metric, scopeLogs) + case pmetric.MetricTypeExponentialHistogram: + m.processExponentialHistogramDataPointsWithMetric(metric, scopeLogs) + case pmetric.MetricTypeSummary: + m.processSummaryDataPointsWithMetric(metric, scopeLogs) + default: + m.logger.Warn("Unknown metric type", zap.String("type", metric.Type().String())) + } +} + +func (m *metricsAsLogs) processGaugeDataPointsWithMetric(metric pmetric.Metric, scopeLogs *plog.ScopeLogs) { + gauge := metric.Gauge() + for i := 0; i < gauge.DataPoints().Len(); i++ { + dataPoint := gauge.DataPoints().At(i) + m.convertGaugeDataPointToLogRecordWithMetric(metric, dataPoint, scopeLogs) + } +} + +func (m *metricsAsLogs) processSumDataPointsWithMetric(metric pmetric.Metric, scopeLogs *plog.ScopeLogs) { + sum := metric.Sum() + for i := 0; i < sum.DataPoints().Len(); i++ { + dataPoint := sum.DataPoints().At(i) + m.convertSumDataPointToLogRecordWithMetric(metric, sum, dataPoint, scopeLogs) + } +} + +func (m *metricsAsLogs) processHistogramDataPointsWithMetric(metric pmetric.Metric, scopeLogs *plog.ScopeLogs) { + histogram := metric.Histogram() + for i := 0; i < histogram.DataPoints().Len(); i++ { + dataPoint := histogram.DataPoints().At(i) + m.convertHistogramDataPointToLogRecordWithMetric(metric, histogram, dataPoint, scopeLogs) + } +} + +func (m *metricsAsLogs) processExponentialHistogramDataPointsWithMetric(metric pmetric.Metric, scopeLogs *plog.ScopeLogs) { + expHistogram := metric.ExponentialHistogram() + for i := 0; i < expHistogram.DataPoints().Len(); i++ { + dataPoint := expHistogram.DataPoints().At(i) + m.convertExponentialHistogramDataPointToLogRecordWithMetric(metric, expHistogram, dataPoint, scopeLogs) + } +} + +func (m *metricsAsLogs) processSummaryDataPointsWithMetric(metric pmetric.Metric, scopeLogs *plog.ScopeLogs) { + summary := metric.Summary() + for i := 0; i < summary.DataPoints().Len(); i++ { + dataPoint := summary.DataPoints().At(i) + m.convertSummaryDataPointToLogRecordWithMetric(metric, dataPoint, scopeLogs) + } +} + +func (m *metricsAsLogs) convertGaugeDataPointToLogRecordWithMetric(metric pmetric.Metric, dataPoint pmetric.NumberDataPoint, scopeLogs *plog.ScopeLogs) { + logRecord := scopeLogs.LogRecords().AppendEmpty() + + m.setLogRecordFromDataPoint(logRecord, metric.Name(), metric.Type().String(), dataPoint.Attributes(), dataPoint.Timestamp(), dataPoint.StartTimestamp()) + m.addCommonMetricAttributes(logRecord, metric) + m.addNumberDataPointAttributes(logRecord, dataPoint, attrGaugeValue) +} + +func (m *metricsAsLogs) convertSumDataPointToLogRecordWithMetric(metric pmetric.Metric, sum pmetric.Sum, dataPoint pmetric.NumberDataPoint, scopeLogs *plog.ScopeLogs) { + logRecord := scopeLogs.LogRecords().AppendEmpty() + + m.setLogRecordFromDataPoint(logRecord, metric.Name(), metric.Type().String(), dataPoint.Attributes(), dataPoint.Timestamp(), dataPoint.StartTimestamp()) + m.addCommonMetricAttributes(logRecord, metric) + logRecord.Attributes().PutBool(attrMetricIsMonotonic, sum.IsMonotonic()) + m.addAggregationTemporalityAttribute(logRecord, sum.AggregationTemporality()) + m.addNumberDataPointAttributes(logRecord, dataPoint, attrSumValue) +} + +func (m *metricsAsLogs) convertHistogramDataPointToLogRecordWithMetric(metric pmetric.Metric, histogram pmetric.Histogram, dataPoint pmetric.HistogramDataPoint, scopeLogs *plog.ScopeLogs) { + logRecord := scopeLogs.LogRecords().AppendEmpty() + + m.setLogRecordFromDataPoint(logRecord, metric.Name(), metric.Type().String(), dataPoint.Attributes(), dataPoint.Timestamp(), dataPoint.StartTimestamp()) + m.addCommonMetricAttributes(logRecord, metric) + m.addAggregationTemporalityAttribute(logRecord, histogram.AggregationTemporality()) + m.addHistogramDataPointAttributes(logRecord, dataPoint) +} + +func (m *metricsAsLogs) convertExponentialHistogramDataPointToLogRecordWithMetric(metric pmetric.Metric, expHistogram pmetric.ExponentialHistogram, dataPoint pmetric.ExponentialHistogramDataPoint, scopeLogs *plog.ScopeLogs) { + logRecord := scopeLogs.LogRecords().AppendEmpty() + + m.setLogRecordFromDataPoint(logRecord, metric.Name(), metric.Type().String(), dataPoint.Attributes(), dataPoint.Timestamp(), dataPoint.StartTimestamp()) + m.addCommonMetricAttributes(logRecord, metric) + m.addAggregationTemporalityAttribute(logRecord, expHistogram.AggregationTemporality()) + m.addExponentialHistogramDataPointAttributes(logRecord, dataPoint) +} + +func (m *metricsAsLogs) convertSummaryDataPointToLogRecordWithMetric(metric pmetric.Metric, dataPoint pmetric.SummaryDataPoint, scopeLogs *plog.ScopeLogs) { + logRecord := scopeLogs.LogRecords().AppendEmpty() + + m.setLogRecordFromDataPoint(logRecord, metric.Name(), metric.Type().String(), dataPoint.Attributes(), dataPoint.Timestamp(), dataPoint.StartTimestamp()) + m.addCommonMetricAttributes(logRecord, metric) + m.addSummaryDataPointAttributes(logRecord, dataPoint) +} + +func (*metricsAsLogs) addAggregationTemporalityAttribute(logRecord plog.LogRecord, temporality pmetric.AggregationTemporality) { + logRecord.Attributes().PutStr(attrMetricAggregationTemporality, temporality.String()) +} + +func (*metricsAsLogs) addCommonMetricAttributes(logRecord plog.LogRecord, metric pmetric.Metric) { + logRecord.Attributes().PutStr(attrMetricDescription, metric.Description()) + logRecord.Attributes().PutStr(attrMetricUnit, metric.Unit()) +} + +func (*metricsAsLogs) addNumberDataPointAttributes(logRecord plog.LogRecord, dataPoint pmetric.NumberDataPoint, valueAttr string) { + switch dataPoint.ValueType() { + case pmetric.NumberDataPointValueTypeInt: + logRecord.Attributes().PutInt(valueAttr, dataPoint.IntValue()) + case pmetric.NumberDataPointValueTypeDouble: + logRecord.Attributes().PutDouble(valueAttr, dataPoint.DoubleValue()) + } +} + +func (*metricsAsLogs) addHistogramDataPointAttributes(logRecord plog.LogRecord, dataPoint pmetric.HistogramDataPoint) { + logRecord.Attributes().PutInt(attrHistogramCount, int64(dataPoint.Count())) + logRecord.Attributes().PutDouble(attrHistogramSum, dataPoint.Sum()) + + if dataPoint.HasMin() { + logRecord.Attributes().PutDouble(attrHistogramMin, dataPoint.Min()) + } + if dataPoint.HasMax() { + logRecord.Attributes().PutDouble(attrHistogramMax, dataPoint.Max()) + } + + bucketCountsSlice := logRecord.Attributes().PutEmptySlice(attrHistogramBucketCounts) + for i := 0; i < dataPoint.BucketCounts().Len(); i++ { + bucketCountsSlice.AppendEmpty().SetInt(int64(dataPoint.BucketCounts().At(i))) + } + + explicitBoundsSlice := logRecord.Attributes().PutEmptySlice(attrHistogramExplicitBounds) + for i := 0; i < dataPoint.ExplicitBounds().Len(); i++ { + explicitBoundsSlice.AppendEmpty().SetDouble(dataPoint.ExplicitBounds().At(i)) + } +} + +func (*metricsAsLogs) addExponentialHistogramDataPointAttributes(logRecord plog.LogRecord, dataPoint pmetric.ExponentialHistogramDataPoint) { + logRecord.Attributes().PutInt(attrExponentialHistogramCount, int64(dataPoint.Count())) + logRecord.Attributes().PutDouble(attrExponentialHistogramSum, dataPoint.Sum()) + logRecord.Attributes().PutInt(attrExponentialHistogramScale, int64(dataPoint.Scale())) + logRecord.Attributes().PutInt(attrExponentialHistogramZeroCount, int64(dataPoint.ZeroCount())) + + if dataPoint.HasMin() { + logRecord.Attributes().PutDouble(attrExponentialHistogramMin, dataPoint.Min()) + } + if dataPoint.HasMax() { + logRecord.Attributes().PutDouble(attrExponentialHistogramMax, dataPoint.Max()) + } +} + +func (*metricsAsLogs) addSummaryDataPointAttributes(logRecord plog.LogRecord, dataPoint pmetric.SummaryDataPoint) { + logRecord.Attributes().PutInt(attrSummaryCount, int64(dataPoint.Count())) + logRecord.Attributes().PutDouble(attrSummarySum, dataPoint.Sum()) + + if dataPoint.QuantileValues().Len() > 0 { + quantilesSlice := logRecord.Attributes().PutEmptySlice(attrSummaryQuantileValues) + for i := 0; i < dataPoint.QuantileValues().Len(); i++ { + qv := dataPoint.QuantileValues().At(i) + quantileMap := quantilesSlice.AppendEmpty().SetEmptyMap() + quantileMap.PutDouble(attrQuantile, qv.Quantile()) + quantileMap.PutDouble(attrValue, qv.Value()) + } + } +} + +func (*metricsAsLogs) setLogRecordFromDataPoint(logRecord plog.LogRecord, metricName, metricType string, attributes pcommon.Map, timestamp, startTimestamp pcommon.Timestamp) { + logRecord.SetTimestamp(timestamp) + if startTimestamp != 0 { + logRecord.SetObservedTimestamp(startTimestamp) + } + + logRecord.Body().SetStr("metric converted to log") + + // Copy datapoint attributes first, before adding metric-specific attributes + attributes.CopyTo(logRecord.Attributes()) + + logRecord.Attributes().PutStr(attrMetricName, metricName) + logRecord.Attributes().PutStr(attrMetricType, metricType) +} diff --git a/connector/metricsaslogsconnector/connector_test.go b/connector/metricsaslogsconnector/connector_test.go new file mode 100644 index 0000000000000..a9d8ca0e5e1a8 --- /dev/null +++ b/connector/metricsaslogsconnector/connector_test.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metricsaslogsconnector + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func TestCapabilities(t *testing.T) { + connector := &metricsAsLogs{} + capabilities := connector.Capabilities() + assert.False(t, capabilities.MutatesData) +} + +func TestConsumeMetrics_EmptyMetrics(t *testing.T) { + sink := &consumertest.LogsSink{} + connector := &metricsAsLogs{ + logsConsumer: sink, + config: &Config{}, + logger: zap.NewNop(), + } + + metrics := pmetric.NewMetrics() + err := connector.ConsumeMetrics(t.Context(), metrics) + require.NoError(t, err) + assert.Empty(t, sink.AllLogs()) +} + +func TestConsumeMetrics_GaugeMetric(t *testing.T) { + tests := []struct { + name string + config *Config + setupMetric func(pmetric.Metric) + expectedAttrs map[string]any + checkResource bool + checkScope bool + }{ + { + name: "int_gauge_with_resource_attributes", + config: &Config{ + IncludeResourceAttributes: true, + IncludeScopeInfo: false, + }, + setupMetric: func(metric pmetric.Metric) { + metric.SetName("test_gauge") + metric.SetDescription("Test gauge metric") + metric.SetUnit("bytes") + gauge := metric.SetEmptyGauge() + dp := gauge.DataPoints().AppendEmpty() + dp.SetIntValue(42) + dp.Attributes().PutStr("label1", "value1") + dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1234567890, 0))) + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(1234567800, 0))) + }, + expectedAttrs: map[string]any{ + attrMetricName: "test_gauge", + attrMetricType: "Gauge", + attrMetricDescription: "Test gauge metric", + attrMetricUnit: "bytes", + attrGaugeValue: int64(42), + "label1": "value1", + }, + checkResource: true, + }, + { + name: "double_gauge_with_scope_info", + config: &Config{ + IncludeResourceAttributes: false, + IncludeScopeInfo: true, + }, + setupMetric: func(metric pmetric.Metric) { + metric.SetName("test_gauge_double") + gauge := metric.SetEmptyGauge() + dp := gauge.DataPoints().AppendEmpty() + dp.SetDoubleValue(3.14) + dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1234567890, 0))) + }, + expectedAttrs: map[string]any{ + attrMetricName: "test_gauge_double", + attrMetricType: "Gauge", + attrGaugeValue: 3.14, + }, + checkScope: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sink := &consumertest.LogsSink{} + connector := &metricsAsLogs{ + logsConsumer: sink, + config: tt.config, + logger: zap.NewNop(), + } + + metrics := createTestMetrics(tt.setupMetric, tt.checkResource, tt.checkScope) + err := connector.ConsumeMetrics(t.Context(), metrics) + require.NoError(t, err) + + require.Len(t, sink.AllLogs(), 1) + logs := sink.AllLogs()[0] + validateLogRecord(t, logs, tt.expectedAttrs, tt.checkResource, tt.checkScope) + }) + } +} + +func TestConsumeMetrics_SumMetric(t *testing.T) { + tests := []struct { + name string + isMonotonic bool + temporality pmetric.AggregationTemporality + expectedAttrs map[string]any + }{ + { + name: "monotonic_cumulative_sum", + isMonotonic: true, + temporality: pmetric.AggregationTemporalityCumulative, + expectedAttrs: map[string]any{ + attrMetricName: "test_sum", + attrMetricType: "Sum", + attrSumValue: int64(100), + attrMetricIsMonotonic: true, + attrMetricAggregationTemporality: "Cumulative", + }, + }, + { + name: "non_monotonic_delta_sum", + isMonotonic: false, + temporality: pmetric.AggregationTemporalityDelta, + expectedAttrs: map[string]any{ + attrMetricName: "test_sum", + attrMetricType: "Sum", + attrSumValue: int64(100), + attrMetricIsMonotonic: false, + attrMetricAggregationTemporality: "Delta", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sink := &consumertest.LogsSink{} + connector := &metricsAsLogs{ + logsConsumer: sink, + config: &Config{}, + logger: zap.NewNop(), + } + + setupMetric := func(metric pmetric.Metric) { + metric.SetName("test_sum") + sum := metric.SetEmptySum() + sum.SetIsMonotonic(tt.isMonotonic) + sum.SetAggregationTemporality(tt.temporality) + dp := sum.DataPoints().AppendEmpty() + dp.SetIntValue(100) + dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1234567890, 0))) + } + + metrics := createTestMetrics(setupMetric, false, false) + err := connector.ConsumeMetrics(t.Context(), metrics) + require.NoError(t, err) + + require.Len(t, sink.AllLogs(), 1) + logs := sink.AllLogs()[0] + validateLogRecord(t, logs, tt.expectedAttrs, false, false) + }) + } +} + +func TestConsumeMetrics_HistogramMetric(t *testing.T) { + sink := &consumertest.LogsSink{} + connector := &metricsAsLogs{ + logsConsumer: sink, + config: &Config{}, + logger: zap.NewNop(), + } + + setupMetric := func(metric pmetric.Metric) { + metric.SetName("test_histogram") + histogram := metric.SetEmptyHistogram() + histogram.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + dp := histogram.DataPoints().AppendEmpty() + dp.SetCount(10) + dp.SetSum(55.5) + dp.SetMin(1.0) + dp.SetMax(10.0) + dp.BucketCounts().FromRaw([]uint64{1, 2, 3, 4}) + dp.ExplicitBounds().FromRaw([]float64{1.0, 5.0, 10.0}) + dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1234567890, 0))) + } + + metrics := createTestMetrics(setupMetric, false, false) + err := connector.ConsumeMetrics(t.Context(), metrics) + require.NoError(t, err) + + require.Len(t, sink.AllLogs(), 1) + logs := sink.AllLogs()[0] + lr := getLogRecord(t, logs) + + assert.Equal(t, "test_histogram", lr.Attributes().AsRaw()[attrMetricName]) + assert.Equal(t, "Histogram", lr.Attributes().AsRaw()[attrMetricType]) + assert.Equal(t, int64(10), lr.Attributes().AsRaw()[attrHistogramCount]) + assert.Equal(t, 55.5, lr.Attributes().AsRaw()[attrHistogramSum]) + assert.Equal(t, 1.0, lr.Attributes().AsRaw()[attrHistogramMin]) + assert.Equal(t, 10.0, lr.Attributes().AsRaw()[attrHistogramMax]) + assert.Equal(t, "Cumulative", lr.Attributes().AsRaw()[attrMetricAggregationTemporality]) + + // Check bucket counts slice + bucketCountsAttr, exists := lr.Attributes().Get(attrHistogramBucketCounts) + require.True(t, exists) + bucketCountsSlice := bucketCountsAttr.Slice() + require.Equal(t, 4, bucketCountsSlice.Len()) + assert.Equal(t, int64(1), bucketCountsSlice.At(0).Int()) + assert.Equal(t, int64(2), bucketCountsSlice.At(1).Int()) + assert.Equal(t, int64(3), bucketCountsSlice.At(2).Int()) + assert.Equal(t, int64(4), bucketCountsSlice.At(3).Int()) + + // Check explicit bounds slice + boundsAttr, exists := lr.Attributes().Get(attrHistogramExplicitBounds) + require.True(t, exists) + boundsSlice := boundsAttr.Slice() + require.Equal(t, 3, boundsSlice.Len()) + assert.Equal(t, 1.0, boundsSlice.At(0).Double()) + assert.Equal(t, 5.0, boundsSlice.At(1).Double()) + assert.Equal(t, 10.0, boundsSlice.At(2).Double()) +} + +func TestConsumeMetrics_ExponentialHistogramMetric(t *testing.T) { + sink := &consumertest.LogsSink{} + connector := &metricsAsLogs{ + logsConsumer: sink, + config: &Config{}, + logger: zap.NewNop(), + } + + setupMetric := func(metric pmetric.Metric) { + metric.SetName("test_exp_histogram") + expHistogram := metric.SetEmptyExponentialHistogram() + expHistogram.SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + dp := expHistogram.DataPoints().AppendEmpty() + dp.SetCount(20) + dp.SetSum(100.0) + dp.SetScale(2) + dp.SetZeroCount(1) + dp.SetMin(0.5) + dp.SetMax(50.0) + dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1234567890, 0))) + } + + metrics := createTestMetrics(setupMetric, false, false) + err := connector.ConsumeMetrics(t.Context(), metrics) + require.NoError(t, err) + + require.Len(t, sink.AllLogs(), 1) + logs := sink.AllLogs()[0] + lr := getLogRecord(t, logs) + + expectedAttrs := map[string]any{ + attrMetricName: "test_exp_histogram", + attrMetricType: "ExponentialHistogram", + attrExponentialHistogramCount: int64(20), + attrExponentialHistogramSum: 100.0, + attrExponentialHistogramScale: int64(2), + attrExponentialHistogramZeroCount: int64(1), + attrExponentialHistogramMin: 0.5, + attrExponentialHistogramMax: 50.0, + attrMetricAggregationTemporality: "Delta", + } + + for key, expected := range expectedAttrs { + actual := lr.Attributes().AsRaw()[key] + assert.Equal(t, expected, actual, "attribute %s mismatch", key) + } +} + +func TestConsumeMetrics_SummaryMetric(t *testing.T) { + sink := &consumertest.LogsSink{} + connector := &metricsAsLogs{ + logsConsumer: sink, + config: &Config{}, + logger: zap.NewNop(), + } + + setupMetric := func(metric pmetric.Metric) { + metric.SetName("test_summary") + summary := metric.SetEmptySummary() + dp := summary.DataPoints().AppendEmpty() + dp.SetCount(100) + dp.SetSum(500.0) + + // Add quantile values + qv1 := dp.QuantileValues().AppendEmpty() + qv1.SetQuantile(0.5) + qv1.SetValue(10.0) + + qv2 := dp.QuantileValues().AppendEmpty() + qv2.SetQuantile(0.95) + qv2.SetValue(45.0) + + dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1234567890, 0))) + } + + metrics := createTestMetrics(setupMetric, false, false) + err := connector.ConsumeMetrics(t.Context(), metrics) + require.NoError(t, err) + + require.Len(t, sink.AllLogs(), 1) + logs := sink.AllLogs()[0] + lr := getLogRecord(t, logs) + + assert.Equal(t, "test_summary", lr.Attributes().AsRaw()[attrMetricName]) + assert.Equal(t, "Summary", lr.Attributes().AsRaw()[attrMetricType]) + assert.Equal(t, int64(100), lr.Attributes().AsRaw()[attrSummaryCount]) + assert.Equal(t, 500.0, lr.Attributes().AsRaw()[attrSummarySum]) + + // Check quantile values slice + quantilesAttr, exists := lr.Attributes().Get(attrSummaryQuantileValues) + require.True(t, exists) + quantilesSlice := quantilesAttr.Slice() + require.Equal(t, 2, quantilesSlice.Len()) + + // Check first quantile + q1 := quantilesSlice.At(0).Map() + assert.Equal(t, 0.5, q1.AsRaw()[attrQuantile]) + assert.Equal(t, 10.0, q1.AsRaw()[attrValue]) + + // Check second quantile + q2 := quantilesSlice.At(1).Map() + assert.Equal(t, 0.95, q2.AsRaw()[attrQuantile]) + assert.Equal(t, 45.0, q2.AsRaw()[attrValue]) +} + +func TestConsumeMetrics_UnknownMetricType(t *testing.T) { + sink := &consumertest.LogsSink{} + + // Create a custom logger to capture log messages + var logMessages []string + logger := zap.NewExample().WithOptions(zap.Hooks(func(entry zapcore.Entry) error { + logMessages = append(logMessages, entry.Message) + return nil + })) + + connector := &metricsAsLogs{ + logsConsumer: sink, + config: &Config{}, + logger: logger, + } + + // Create metrics with an unknown type (we'll simulate this by not setting any type) + metrics := pmetric.NewMetrics() + rm := metrics.ResourceMetrics().AppendEmpty() + sm := rm.ScopeMetrics().AppendEmpty() + metric := sm.Metrics().AppendEmpty() + metric.SetName("unknown_metric") + // Note: Not setting any metric type will result in MetricTypeEmpty + + err := connector.ConsumeMetrics(t.Context(), metrics) + require.NoError(t, err) + + // Should produce resource and scope logs but no log records since unknown metric type is skipped + require.Len(t, sink.AllLogs(), 1) + logs := sink.AllLogs()[0] + + require.Equal(t, 1, logs.ResourceLogs().Len()) + rl := logs.ResourceLogs().At(0) + require.Equal(t, 1, rl.ScopeLogs().Len()) + sl := rl.ScopeLogs().At(0) + + // No log records should be created for unknown metric types + assert.Equal(t, 0, sl.LogRecords().Len()) + + // Should have logged a warning about unknown metric type + assert.Contains(t, logMessages, "Unknown metric type") +} + +// Helper functions + +func createTestMetrics(setupMetric func(pmetric.Metric), withResource, withScope bool) pmetric.Metrics { + metrics := pmetric.NewMetrics() + rm := metrics.ResourceMetrics().AppendEmpty() + + if withResource { + rm.Resource().Attributes().PutStr("service.name", "test-service") + rm.Resource().Attributes().PutStr("service.version", "1.0.0") + rm.SetSchemaUrl("https://opentelemetry.io/schemas/1.0.0") + } + + sm := rm.ScopeMetrics().AppendEmpty() + + if withScope { + sm.Scope().SetName("test-scope") + sm.Scope().SetVersion("1.0.0") + sm.SetSchemaUrl("https://opentelemetry.io/schemas/1.0.0") + } + + metric := sm.Metrics().AppendEmpty() + setupMetric(metric) + + return metrics +} + +func validateLogRecord(t *testing.T, logs plog.Logs, expectedAttrs map[string]any, checkResource, checkScope bool) { + require.Equal(t, 1, logs.ResourceLogs().Len()) + rl := logs.ResourceLogs().At(0) + + if checkResource { + assert.Equal(t, "test-service", rl.Resource().Attributes().AsRaw()["service.name"]) + assert.Equal(t, "1.0.0", rl.Resource().Attributes().AsRaw()["service.version"]) + assert.Equal(t, "https://opentelemetry.io/schemas/1.0.0", rl.SchemaUrl()) + } + + require.Equal(t, 1, rl.ScopeLogs().Len()) + sl := rl.ScopeLogs().At(0) + + if checkScope { + assert.Equal(t, "test-scope", sl.Scope().Name()) + assert.Equal(t, "1.0.0", sl.Scope().Version()) + assert.Equal(t, "https://opentelemetry.io/schemas/1.0.0", sl.SchemaUrl()) + } + + require.Equal(t, 1, sl.LogRecords().Len()) + lr := sl.LogRecords().At(0) + + // Check log body + assert.Equal(t, "metric converted to log", lr.Body().AsString()) + + // Check all expected attributes + for key, expected := range expectedAttrs { + actual := lr.Attributes().AsRaw()[key] + assert.Equal(t, expected, actual, "attribute %s mismatch", key) + } +} + +func getLogRecord(t *testing.T, logs plog.Logs) plog.LogRecord { + require.Equal(t, 1, logs.ResourceLogs().Len()) + rl := logs.ResourceLogs().At(0) + require.Equal(t, 1, rl.ScopeLogs().Len()) + sl := rl.ScopeLogs().At(0) + require.Equal(t, 1, sl.LogRecords().Len()) + return sl.LogRecords().At(0) +} diff --git a/connector/metricsaslogsconnector/doc.go b/connector/metricsaslogsconnector/doc.go new file mode 100644 index 0000000000000..9c12b756a9a0f --- /dev/null +++ b/connector/metricsaslogsconnector/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +package metricsaslogsconnector // import "github.com/open-telemetry/opentelemetry-collector-contrib/connector/metricsaslogsconnector" diff --git a/connector/metricsaslogsconnector/factory.go b/connector/metricsaslogsconnector/factory.go new file mode 100644 index 0000000000000..599cb5b9c650a --- /dev/null +++ b/connector/metricsaslogsconnector/factory.go @@ -0,0 +1,35 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metricsaslogsconnector // import "github.com/open-telemetry/opentelemetry-collector-contrib/connector/metricsaslogsconnector" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/connector/xconnector" + "go.opentelemetry.io/collector/consumer" +) + +func NewFactory() connector.Factory { + return xconnector.NewFactory( + component.MustNewType("metricsaslogs"), + createDefaultConfig, + xconnector.WithMetricsToLogs(createMetricsToLogs, component.StabilityLevelAlpha), + ) +} + +func createMetricsToLogs( + _ context.Context, + set connector.Settings, + cfg component.Config, + nextConsumer consumer.Logs, +) (connector.Metrics, error) { + c := cfg.(*Config) + return &metricsAsLogs{ + logsConsumer: nextConsumer, + config: c, + logger: set.Logger, + }, nil +} diff --git a/connector/metricsaslogsconnector/factory_test.go b/connector/metricsaslogsconnector/factory_test.go new file mode 100644 index 0000000000000..90bf5b9d9a58e --- /dev/null +++ b/connector/metricsaslogsconnector/factory_test.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metricsaslogsconnector + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector/connectortest" + "go.opentelemetry.io/collector/consumer/consumertest" +) + +func TestNewFactory(t *testing.T) { + factory := NewFactory() + + assert.NotNil(t, factory) + assert.Equal(t, component.MustNewType("metricsaslogs"), factory.Type()) + assert.Equal(t, component.StabilityLevelAlpha, factory.MetricsToLogsStability()) +} + +func TestCreateMetricsToLogs(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + set := connectortest.NewNopSettings(component.MustNewType("metricsaslogs")) + consumer := &consumertest.LogsSink{} + + connector, err := factory.CreateMetricsToLogs( + t.Context(), + set, + cfg, + consumer, + ) + + require.NoError(t, err) + assert.NotNil(t, connector) + assert.IsType(t, &metricsAsLogs{}, connector) + + // Verify the connector is properly configured + mal := connector.(*metricsAsLogs) + assert.Equal(t, consumer, mal.logsConsumer) + assert.Equal(t, cfg, mal.config) + assert.NotNil(t, mal.logger) +} diff --git a/connector/metricsaslogsconnector/generated_component_test.go b/connector/metricsaslogsconnector/generated_component_test.go new file mode 100644 index 0000000000000..d802c83f3d40d --- /dev/null +++ b/connector/metricsaslogsconnector/generated_component_test.go @@ -0,0 +1,90 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metricsaslogsconnector + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/connector/connectortest" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pipeline" +) + +var typ = component.MustNewType("metricsaslogs") + +func TestComponentFactoryType(t *testing.T) { + require.Equal(t, typ, NewFactory().Type()) +} + +func TestComponentConfigStruct(t *testing.T) { + require.NoError(t, componenttest.CheckConfigStruct(NewFactory().CreateDefaultConfig())) +} + +func TestComponentLifecycle(t *testing.T) { + factory := NewFactory() + + tests := []struct { + createFn func(ctx context.Context, set connector.Settings, cfg component.Config) (component.Component, error) + name string + }{ + + { + name: "metrics_to_logs", + createFn: func(ctx context.Context, set connector.Settings, cfg component.Config) (component.Component, error) { + router := connector.NewLogsRouter(map[pipeline.ID]consumer.Logs{pipeline.NewID(pipeline.SignalLogs): consumertest.NewNop()}) + return factory.CreateMetricsToLogs(ctx, set, cfg, router) + }, + }, + } + + cm, err := confmaptest.LoadConf("metadata.yaml") + require.NoError(t, err) + cfg := factory.CreateDefaultConfig() + sub, err := cm.Sub("tests::config") + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(&cfg)) + + for _, tt := range tests { + t.Run(tt.name+"-shutdown", func(t *testing.T) { + c, err := tt.createFn(context.Background(), connectortest.NewNopSettings(typ), cfg) + require.NoError(t, err) + err = c.Shutdown(context.Background()) + require.NoError(t, err) + }) + t.Run(tt.name+"-lifecycle", func(t *testing.T) { + firstConnector, err := tt.createFn(context.Background(), connectortest.NewNopSettings(typ), cfg) + require.NoError(t, err) + host := newMdatagenNopHost() + require.NoError(t, err) + require.NoError(t, firstConnector.Start(context.Background(), host)) + require.NoError(t, firstConnector.Shutdown(context.Background())) + secondConnector, err := tt.createFn(context.Background(), connectortest.NewNopSettings(typ), cfg) + require.NoError(t, err) + require.NoError(t, secondConnector.Start(context.Background(), host)) + require.NoError(t, secondConnector.Shutdown(context.Background())) + }) + } +} + +var _ component.Host = (*mdatagenNopHost)(nil) + +type mdatagenNopHost struct{} + +func newMdatagenNopHost() component.Host { + return &mdatagenNopHost{} +} + +func (mnh *mdatagenNopHost) GetExtensions() map[component.ID]component.Component { + return nil +} + +func (mnh *mdatagenNopHost) GetFactory(_ component.Kind, _ component.Type) component.Factory { + return nil +} diff --git a/connector/metricsaslogsconnector/generated_package_test.go b/connector/metricsaslogsconnector/generated_package_test.go new file mode 100644 index 0000000000000..00613f2134794 --- /dev/null +++ b/connector/metricsaslogsconnector/generated_package_test.go @@ -0,0 +1,13 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metricsaslogsconnector + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/connector/metricsaslogsconnector/go.mod b/connector/metricsaslogsconnector/go.mod new file mode 100644 index 0000000000000..157006b2662ab --- /dev/null +++ b/connector/metricsaslogsconnector/go.mod @@ -0,0 +1,62 @@ +module github.com/open-telemetry/opentelemetry-collector-contrib/connector/metricsaslogsconnector + +go 1.24.0 + +require ( + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/collector/component v1.43.1-0.20251013162618-a96eab114ea4 + go.opentelemetry.io/collector/component/componenttest v0.137.1-0.20251013162618-a96eab114ea4 + go.opentelemetry.io/collector/confmap v1.43.1-0.20251013162618-a96eab114ea4 + go.opentelemetry.io/collector/connector v0.137.1-0.20251013162618-a96eab114ea4 + go.opentelemetry.io/collector/connector/connectortest v0.137.1-0.20251013162618-a96eab114ea4 + go.opentelemetry.io/collector/connector/xconnector v0.137.1-0.20251013162618-a96eab114ea4 + go.opentelemetry.io/collector/consumer v1.43.1-0.20251013162618-a96eab114ea4 + go.opentelemetry.io/collector/consumer/consumertest v0.137.1-0.20251013162618-a96eab114ea4 + go.opentelemetry.io/collector/pdata v1.43.1-0.20251013162618-a96eab114ea4 + go.opentelemetry.io/collector/pipeline v1.43.1-0.20251013162618-a96eab114ea4 + go.uber.org/goleak v1.3.0 + go.uber.org/zap v1.27.0 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/knadh/koanf/maps v0.1.2 // indirect + github.com/knadh/koanf/providers/confmap v1.0.0 // indirect + github.com/knadh/koanf/v2 v2.3.0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/consumer/xconsumer v0.137.1-0.20251013162618-a96eab114ea4 // indirect + go.opentelemetry.io/collector/featuregate v1.43.1-0.20251013162618-a96eab114ea4 // indirect + go.opentelemetry.io/collector/internal/fanoutconsumer v0.137.1-0.20251013162618-a96eab114ea4 // indirect + go.opentelemetry.io/collector/internal/telemetry v0.137.1-0.20251013162618-a96eab114ea4 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.137.1-0.20251013162618-a96eab114ea4 // indirect + go.opentelemetry.io/collector/pipeline/xpipeline v0.137.1-0.20251013162618-a96eab114ea4 // indirect + go.opentelemetry.io/contrib/bridges/otelzap v0.13.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/log v0.14.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/net v0.42.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.27.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect + google.golang.org/grpc v1.76.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/connector/metricsaslogsconnector/go.sum b/connector/metricsaslogsconnector/go.sum new file mode 100644 index 0000000000000..cc784039b54f0 --- /dev/null +++ b/connector/metricsaslogsconnector/go.sum @@ -0,0 +1,167 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo= +github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE= +github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A= +github.com/knadh/koanf/v2 v2.3.0 h1:Qg076dDRFHvqnKG97ZEsi9TAg2/nFTa9hCdcSa1lvlM= +github.com/knadh/koanf/v2 v2.3.0/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/component v1.43.1-0.20251013162618-a96eab114ea4 h1:w1VjKgmFktbRwdt5L1C4j6jL60sLhThh9dRRBqArUiA= +go.opentelemetry.io/collector/component v1.43.1-0.20251013162618-a96eab114ea4/go.mod h1:LJ8w25mRyV1axguFAwX6NxKzh0sXK4pYVOn3dJvfVuk= +go.opentelemetry.io/collector/component/componenttest v0.137.1-0.20251013162618-a96eab114ea4 h1:GDUlSEmbp4yCEhOym78zazGX/5Cp8/OGfFNtk7M+T1w= +go.opentelemetry.io/collector/component/componenttest v0.137.1-0.20251013162618-a96eab114ea4/go.mod h1:08xR/WnVzcz8dz4TfPidNfQ6GsZ//mp9g6RvXgBMO/Q= +go.opentelemetry.io/collector/confmap v1.43.1-0.20251013162618-a96eab114ea4 h1:3BgozNc4bsTXC+SCXpxpx4SODMp0E7YlWIRR1lh+hw0= +go.opentelemetry.io/collector/confmap v1.43.1-0.20251013162618-a96eab114ea4/go.mod h1:N5GZpFCmwD1GynDu3IWaZW5Ycfc/7YxSU0q1/E3vLdg= +go.opentelemetry.io/collector/connector v0.137.1-0.20251013162618-a96eab114ea4 h1:W98PYYhj00HnLuT+z9QDL/ucXrn2HpoHgDlIqktE/lQ= +go.opentelemetry.io/collector/connector v0.137.1-0.20251013162618-a96eab114ea4/go.mod h1:ipm2LscGOkEYLRSh9o/Rvna/2TaXw21SCSQDrinCPL0= +go.opentelemetry.io/collector/connector/connectortest v0.137.1-0.20251013162618-a96eab114ea4 h1:bS/fJQEHbwYvlP+wpgyrTOYS8VffddEgCil0Tbtk6VQ= +go.opentelemetry.io/collector/connector/connectortest v0.137.1-0.20251013162618-a96eab114ea4/go.mod h1:RpEzK/SdNq2lfWOT3vroWQjvB7mQdfJMphdOPWMJTFI= +go.opentelemetry.io/collector/connector/xconnector v0.137.1-0.20251013162618-a96eab114ea4 h1:PiM3HICkqHIwbjpXc6d9HyyFx7o7W7vNnxQ+B0OFZZ8= +go.opentelemetry.io/collector/connector/xconnector v0.137.1-0.20251013162618-a96eab114ea4/go.mod h1:NAZ7sAOeLJpF1TEG4OBck7tA3gnIfTjoBK+j6RBI6OY= +go.opentelemetry.io/collector/consumer v1.43.1-0.20251013162618-a96eab114ea4 h1:UiudlwgYJGaSmhV3i2RWDgpWaS73B62tRe3Q+blKb3g= +go.opentelemetry.io/collector/consumer v1.43.1-0.20251013162618-a96eab114ea4/go.mod h1:wO/uVbt7tB2OsNGD3GdixgjEKL2wKOGrK4hTJ494pYU= +go.opentelemetry.io/collector/consumer/consumertest v0.137.1-0.20251013162618-a96eab114ea4 h1:grNLBP+yj2xSQmlGn0Dp52xZgZEbYp/DB63lgoNK1p4= +go.opentelemetry.io/collector/consumer/consumertest v0.137.1-0.20251013162618-a96eab114ea4/go.mod h1:Yz1Mo4ibkgutZRUOf/odYxyrJCnqWPiQ3s9XsESkyZA= +go.opentelemetry.io/collector/consumer/xconsumer v0.137.1-0.20251013162618-a96eab114ea4 h1:t3bFdGBL/gCTXVeKRNd/n9dhl1mKj+upajY6Y5pL1yM= +go.opentelemetry.io/collector/consumer/xconsumer v0.137.1-0.20251013162618-a96eab114ea4/go.mod h1:RsSFgyg2HiQHSE0yBFxG1GKm+x/sKtLYNkhwUqQ6ABg= +go.opentelemetry.io/collector/featuregate v1.43.1-0.20251013162618-a96eab114ea4 h1:IXK7EGifr3Lic3mnMlkVXFb1HBlkBoYfoy/AYzsevko= +go.opentelemetry.io/collector/featuregate v1.43.1-0.20251013162618-a96eab114ea4/go.mod h1:d0tiRzVYrytB6LkcYgz2ESFTv7OktRPQe0QEQcPt1L4= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.137.1-0.20251013162618-a96eab114ea4 h1:DBkHHU7rC431EmRnnh6HUnjzXF2ttYpvrDzWqVcPZXM= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.137.1-0.20251013162618-a96eab114ea4/go.mod h1:rawRluXfYgIWtmTc0UPxJEOqyewKKbNKXzTcqGc9MnE= +go.opentelemetry.io/collector/internal/telemetry v0.137.1-0.20251013162618-a96eab114ea4 h1:E94fKydbFNkR6Zfhix/alvOftJx6e6srYQ8PIr9xYRw= +go.opentelemetry.io/collector/internal/telemetry v0.137.1-0.20251013162618-a96eab114ea4/go.mod h1:ui3HnaeyvIe6tpjUFcL70Ev3aw5UxQnzoBGhuXfbbfs= +go.opentelemetry.io/collector/pdata v1.43.1-0.20251013162618-a96eab114ea4 h1:lmaRPm+HIMu/Tz4Ht9/NcGCd7FVOUXBmC3fXDZYN6+E= +go.opentelemetry.io/collector/pdata v1.43.1-0.20251013162618-a96eab114ea4/go.mod h1:rhhv1vy8COsKFpXiBtLG8GTRDRjg2DL7JPq4E+xOD5Q= +go.opentelemetry.io/collector/pdata/pprofile v0.137.1-0.20251013162618-a96eab114ea4 h1:fYu+ASUiQojOigzkJxC4A9BONSV0hKwB3TncYPDbjNs= +go.opentelemetry.io/collector/pdata/pprofile v0.137.1-0.20251013162618-a96eab114ea4/go.mod h1:+OCHaUJEApvJFn8FRjATMpYUAt3QKnnKjOfF8vxCtzs= +go.opentelemetry.io/collector/pdata/testdata v0.137.0 h1:+oaGvbt0v7xryTX827szmyYWSAtvA0LbysEFV2nFjs0= +go.opentelemetry.io/collector/pdata/testdata v0.137.0/go.mod h1:3512FJaQsZz5EBlrY46xKjzoBc0MoMcQtAqYs2NaRQM= +go.opentelemetry.io/collector/pipeline v1.43.1-0.20251013162618-a96eab114ea4 h1:Fak0kW9jeCaXsdx9lTF8ben4Exnpo3kff9btUc7aXhI= +go.opentelemetry.io/collector/pipeline v1.43.1-0.20251013162618-a96eab114ea4/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI= +go.opentelemetry.io/collector/pipeline/xpipeline v0.137.1-0.20251013162618-a96eab114ea4 h1:zAVkJfuNFcHJ514dWdqsom9/X3otXddpcOxsKJmf4+I= +go.opentelemetry.io/collector/pipeline/xpipeline v0.137.1-0.20251013162618-a96eab114ea4/go.mod h1:nQmJ9w3UWOwNmaUR1EalDLyswzHfJcBPMm/NmcytH74= +go.opentelemetry.io/contrib/bridges/otelzap v0.13.0 h1:aBKdhLVieqvwWe9A79UHI/0vgp2t/s2euY8X59pGRlw= +go.opentelemetry.io/contrib/bridges/otelzap v0.13.0/go.mod h1:SYqtxLQE7iINgh6WFuVi2AI70148B8EI35DSk0Wr8m4= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/log v0.14.0 h1:2rzJ+pOAZ8qmZ3DDHg73NEKzSZkhkGIua9gXtxNGgrM= +go.opentelemetry.io/otel/log v0.14.0/go.mod h1:5jRG92fEAgx0SU/vFPxmJvhIuDU9E1SUnEQrMlJpOno= +go.opentelemetry.io/otel/log/logtest v0.14.0 h1:BGTqNeluJDK2uIHAY8lRqxjVAYfqgcaTbVk1n3MWe5A= +go.opentelemetry.io/otel/log/logtest v0.14.0/go.mod h1:IuguGt8XVP4XA4d2oEEDMVDBBCesMg8/tSGWDjuKfoA= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/slim/otlp v1.8.0 h1:afcLwp2XOeCbGrjufT1qWyruFt+6C9g5SOuymrSPUXQ= +go.opentelemetry.io/proto/slim/otlp v1.8.0/go.mod h1:Yaa5fjYm1SMCq0hG0x/87wV1MP9H5xDuG/1+AhvBcsI= +go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.1.0 h1:Uc+elixz922LHx5colXGi1ORbsW8DTIGM+gg+D9V7HE= +go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.1.0/go.mod h1:VyU6dTWBWv6h9w/+DYgSZAPMabWbPTFTuxp25sM8+s0= +go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.1.0 h1:i8YpvWGm/Uq1koL//bnbJ/26eV3OrKWm09+rDYo7keU= +go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.1.0/go.mod h1:pQ70xHY/ZVxNUBPn+qUWPl8nwai87eWdqL3M37lNi9A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= +golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= +google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/connector/metricsaslogsconnector/internal/metadata/generated_status.go b/connector/metricsaslogsconnector/internal/metadata/generated_status.go new file mode 100644 index 0000000000000..4022962374b5d --- /dev/null +++ b/connector/metricsaslogsconnector/internal/metadata/generated_status.go @@ -0,0 +1,16 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" +) + +var ( + Type = component.MustNewType("metricsaslogs") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/connector/metricsaslogsconnector" +) + +const ( + MetricsToLogsStability = component.StabilityLevelDevelopment +) diff --git a/connector/metricsaslogsconnector/metadata.yaml b/connector/metricsaslogsconnector/metadata.yaml new file mode 100644 index 0000000000000..449631ad25981 --- /dev/null +++ b/connector/metricsaslogsconnector/metadata.yaml @@ -0,0 +1,9 @@ +type: metricsaslogs + +status: + class: connector + stability: + development: [metrics_to_logs] + distributions: [] + codeowners: + active: [atoulme] \ No newline at end of file diff --git a/internal/tidylist/tidylist.txt b/internal/tidylist/tidylist.txt index f323aaf731e82..d6c24cf5f45ac 100644 --- a/internal/tidylist/tidylist.txt +++ b/internal/tidylist/tidylist.txt @@ -75,6 +75,7 @@ connector/datadogconnector connector/exceptionsconnector connector/failoverconnector connector/grafanacloudconnector +connector/metricsaslogsconnector connector/otlpjsonconnector connector/roundrobinconnector connector/servicegraphconnector diff --git a/versions.yaml b/versions.yaml index 64786c8e610f4..ff838eb77916d 100644 --- a/versions.yaml +++ b/versions.yaml @@ -26,6 +26,7 @@ module-sets: - github.com/open-telemetry/opentelemetry-collector-contrib/connector/signaltometricsconnector - github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector - github.com/open-telemetry/opentelemetry-collector-contrib/connector/sumconnector + - github.com/open-telemetry/opentelemetry-collector-contrib/connector/metricsaslogsconnector - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/alertmanagerexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/alibabacloudlogserviceexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter