Skip to content

Commit 3e7e30a

Browse files
authored
Merge branch 'main' into main
2 parents 1d8f2cb + e3f1205 commit 3e7e30a

File tree

63 files changed

+2405
-390
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

63 files changed

+2405
-390
lines changed

.chloggen/43719.yaml

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
# Use this changelog template to create an entry for release notes.
2+
3+
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
4+
change_type: bug_fix
5+
6+
# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog)
7+
component: exporter/loadbalancing
8+
9+
# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
10+
note: "Fix high cardinality issue in loadbalancing exporter by moving endpoint from exporter ID to attributes"
11+
12+
# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
13+
issues: [43719]
14+
15+
# (Optional) One or more lines of additional information to render under the primary note.
16+
# These lines will be padded with 2 spaces and then inserted directly into the document.
17+
# Use pipe (|) for multiline entries.
18+
subtext: |
19+
Previously, the exporter created unique IDs for each backend endpoint by appending the endpoint
20+
to the exporter ID (e.g., loadbalancing_10.11.68.62:4317). This caused high cardinality in metrics,
21+
especially in dynamic environments. Now the endpoint is added as an attribute instead.
22+
23+
# If your change doesn't affect end users or the exported elements of any package,
24+
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
25+
# Optional: The change log or logs in which this entry should be included.
26+
# e.g. '[user]' or '[user, api]'
27+
# Include 'user' if the change is relevant to end users.
28+
# Include 'api' if there is a change to a library API.
29+
# Default: '[user]'
30+
change_logs: [user]
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
change_type: 'enhancement'
2+
component: extension/awslogs_encoding
3+
note: Add support for AWS Network Firewall logs.
4+
issues: [43616]
5+
subtext: The AWS Logs Encoding Extension now supports unmarshaling AWS Network Firewall logs into OpenTelemetry logs format.
6+
change_logs: [user]
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
# Use this changelog template to create an entry for release notes.
2+
3+
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
4+
change_type: enhancement
5+
6+
# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
7+
component: connector/count
8+
9+
# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
10+
note: "Support for setting attributes from scope and resource levels. Precedence order: Span (or Log Record, etc.) > Scope attributes > Resource attributes."
11+
12+
# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
13+
issues: [41859]
14+
15+
# (Optional) One or more lines of additional information to render under the primary note.
16+
# These lines will be padded with 2 spaces and then inserted directly into the document.
17+
# Use pipe (|) for multiline entries.
18+
subtext:
19+
20+
# If your change doesn't affect end users or the exported elements of any package,
21+
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
22+
# Optional: The change log or logs in which this entry should be included.
23+
# e.g. '[user]' or '[user, api]'
24+
# Include 'user' if the change is relevant to end users.
25+
# Include 'api' if there is a change to a library API.
26+
# Default: '[user]'
27+
change_logs: [user]

.github/workflows/build-and-test.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -367,6 +367,7 @@ jobs:
367367
# list of components that require privileged (sudo) access for
368368
# its integration tests `go test -exec sudo -run Sudo -tags=integration`
369369
- extension/cgroupruntimeextension
370+
- receiver/icmpcheckreceiver
370371
runs-on: ubuntu-24.04
371372
needs: [setup-environment]
372373
steps:

cmd/telemetrygen/internal/util/metrics.go

Lines changed: 0 additions & 35 deletions
This file was deleted.

cmd/telemetrygen/pkg/metrics/worker.go

Lines changed: 27 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ import (
2020
"golang.org/x/time/rate"
2121

2222
"github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/internal/common"
23-
"github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/internal/util"
2423
types "github.com/open-telemetry/opentelemetry-collector-contrib/cmd/telemetrygen/pkg"
2524
)
2625

@@ -195,7 +194,7 @@ func (w worker) simulateMetrics(res *resource.Resource, exporter sdkmetric.Expor
195194
Attributes: attribute.NewSet(signalAttrs...),
196195
Exemplars: w.exemplars,
197196
}
198-
util.ExpoHistToSDKExponentialDataPoint(hist, dp)
197+
expoHistToSDKExponentialDataPoint(hist, dp)
199198

200199
metrics = append(metrics, metricdata.Metrics{
201200
Name: w.metricName,
@@ -234,3 +233,29 @@ func (w worker) simulateMetrics(res *resource.Resource, exporter sdkmetric.Expor
234233
w.logger.Info("metrics generated", zap.Int64("metrics", i))
235234
w.wg.Done()
236235
}
236+
237+
// expoHistToSDKExponentialDataPoint copies `lightstep/go-expohisto` structure.Histogram to
238+
// metricdata.ExponentialHistogramDataPoint
239+
func expoHistToSDKExponentialDataPoint(agg *structure.Histogram[float64], dp *metricdata.ExponentialHistogramDataPoint[int64]) {
240+
dp.Count = agg.Count()
241+
dp.Sum = int64(agg.Sum())
242+
dp.ZeroCount = agg.ZeroCount()
243+
dp.Scale = agg.Scale()
244+
dp.ZeroThreshold = 0.0 // go-expohisto doesn't expose ZeroThreshold, use default
245+
246+
// Convert positive buckets
247+
posBuckets := agg.Positive()
248+
dp.PositiveBucket.Offset = posBuckets.Offset()
249+
dp.PositiveBucket.Counts = make([]uint64, posBuckets.Len())
250+
for i := uint32(0); i < posBuckets.Len(); i++ {
251+
dp.PositiveBucket.Counts[i] = posBuckets.At(i)
252+
}
253+
254+
// Convert negative buckets
255+
negBuckets := agg.Negative()
256+
dp.NegativeBucket.Offset = negBuckets.Offset()
257+
dp.NegativeBucket.Counts = make([]uint64, negBuckets.Len())
258+
for i := uint32(0); i < negBuckets.Len(); i++ {
259+
dp.NegativeBucket.Counts[i] = negBuckets.At(i)
260+
}
261+
}

connector/countconnector/README.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,8 @@ connectors:
9999

100100
#### Attributes
101101

102-
`spans`, `spanevents`, `datapoints`, and `logs` may be counted according to attributes.
102+
`spans`, `spanevents`, `datapoints`, and `logs` may be counted according to attributes. In such cases, attribute precedence follows this order:
103+
span(logRecord, DataPoint, profile) attributes > scope attributes > resource attributes.
103104

104105
If attributes are specified for custom metrics, a separate count will be generated for each unique
105106
set of attribute values. Each count will be emitted as a data point on the same metric.

connector/countconnector/connector.go

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -50,24 +50,26 @@ func (c *count) ConsumeTraces(ctx context.Context, td ptrace.Traces) error {
5050
countMetrics.ResourceMetrics().EnsureCapacity(td.ResourceSpans().Len())
5151
for i := 0; i < td.ResourceSpans().Len(); i++ {
5252
resourceSpan := td.ResourceSpans().At(i)
53+
resourceAttrs := resourceSpan.Resource().Attributes()
5354
spansCounter := newCounter[ottlspan.TransformContext](c.spansMetricDefs)
5455
spanEventsCounter := newCounter[ottlspanevent.TransformContext](c.spanEventsMetricDefs)
5556

5657
for j := 0; j < resourceSpan.ScopeSpans().Len(); j++ {
5758
scopeSpan := resourceSpan.ScopeSpans().At(j)
59+
scopeAttrs := scopeSpan.Scope().Attributes()
5860

5961
for k := 0; k < scopeSpan.Spans().Len(); k++ {
6062
span := scopeSpan.Spans().At(k)
6163
spansCounter.updateTimestamp(span.StartTimestamp())
6264
spansCounter.updateTimestamp(span.EndTimestamp())
6365
sCtx := ottlspan.NewTransformContext(span, scopeSpan.Scope(), resourceSpan.Resource(), scopeSpan, resourceSpan)
64-
multiError = errors.Join(multiError, spansCounter.update(ctx, span.Attributes(), sCtx))
66+
multiError = errors.Join(multiError, spansCounter.update(ctx, span.Attributes(), scopeAttrs, resourceAttrs, sCtx))
6567

6668
for l := 0; l < span.Events().Len(); l++ {
6769
event := span.Events().At(l)
6870
spanEventsCounter.updateTimestamp(event.Timestamp())
6971
eCtx := ottlspanevent.NewTransformContext(event, span, scopeSpan.Scope(), resourceSpan.Resource(), scopeSpan, resourceSpan)
70-
multiError = errors.Join(multiError, spanEventsCounter.update(ctx, event.Attributes(), eCtx))
72+
multiError = errors.Join(multiError, spanEventsCounter.update(ctx, event.Attributes(), scopeAttrs, resourceAttrs, eCtx))
7173
}
7274
}
7375
}
@@ -98,16 +100,18 @@ func (c *count) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
98100
countMetrics.ResourceMetrics().EnsureCapacity(md.ResourceMetrics().Len())
99101
for i := 0; i < md.ResourceMetrics().Len(); i++ {
100102
resourceMetric := md.ResourceMetrics().At(i)
103+
resourceAttrs := resourceMetric.Resource().Attributes()
101104
metricsCounter := newCounter[ottlmetric.TransformContext](c.metricsMetricDefs)
102105
dataPointsCounter := newCounter[ottldatapoint.TransformContext](c.dataPointsMetricDefs)
103106

104107
for j := 0; j < resourceMetric.ScopeMetrics().Len(); j++ {
105108
scopeMetrics := resourceMetric.ScopeMetrics().At(j)
109+
scopeAttrs := scopeMetrics.Scope().Attributes()
106110

107111
for k := 0; k < scopeMetrics.Metrics().Len(); k++ {
108112
metric := scopeMetrics.Metrics().At(k)
109113
mCtx := ottlmetric.NewTransformContext(metric, scopeMetrics.Metrics(), scopeMetrics.Scope(), resourceMetric.Resource(), scopeMetrics, resourceMetric)
110-
multiError = errors.Join(multiError, metricsCounter.update(ctx, pcommon.NewMap(), mCtx))
114+
multiError = errors.Join(multiError, metricsCounter.update(ctx, pcommon.NewMap(), scopeAttrs, resourceAttrs, mCtx))
111115

112116
//exhaustive:enforce
113117
switch metric.Type() {
@@ -117,39 +121,39 @@ func (c *count) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
117121
dp := dps.At(i)
118122
dataPointsCounter.updateTimestamp(dp.Timestamp())
119123
dCtx := ottldatapoint.NewTransformContext(dp, metric, scopeMetrics.Metrics(), scopeMetrics.Scope(), resourceMetric.Resource(), scopeMetrics, resourceMetric)
120-
multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dp.Attributes(), dCtx))
124+
multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dp.Attributes(), scopeAttrs, resourceAttrs, dCtx))
121125
}
122126
case pmetric.MetricTypeSum:
123127
dps := metric.Sum().DataPoints()
124128
for i := 0; i < dps.Len(); i++ {
125129
dp := dps.At(i)
126130
dataPointsCounter.updateTimestamp(dp.Timestamp())
127131
dCtx := ottldatapoint.NewTransformContext(dp, metric, scopeMetrics.Metrics(), scopeMetrics.Scope(), resourceMetric.Resource(), scopeMetrics, resourceMetric)
128-
multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dp.Attributes(), dCtx))
132+
multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dp.Attributes(), scopeAttrs, resourceAttrs, dCtx))
129133
}
130134
case pmetric.MetricTypeSummary:
131135
dps := metric.Summary().DataPoints()
132136
for i := 0; i < dps.Len(); i++ {
133137
dp := dps.At(i)
134138
dataPointsCounter.updateTimestamp(dp.Timestamp())
135139
dCtx := ottldatapoint.NewTransformContext(dp, metric, scopeMetrics.Metrics(), scopeMetrics.Scope(), resourceMetric.Resource(), scopeMetrics, resourceMetric)
136-
multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dp.Attributes(), dCtx))
140+
multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dp.Attributes(), scopeAttrs, resourceAttrs, dCtx))
137141
}
138142
case pmetric.MetricTypeHistogram:
139143
dps := metric.Histogram().DataPoints()
140144
for i := 0; i < dps.Len(); i++ {
141145
dp := dps.At(i)
142146
dataPointsCounter.updateTimestamp(dp.Timestamp())
143147
dCtx := ottldatapoint.NewTransformContext(dp, metric, scopeMetrics.Metrics(), scopeMetrics.Scope(), resourceMetric.Resource(), scopeMetrics, resourceMetric)
144-
multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dp.Attributes(), dCtx))
148+
multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dp.Attributes(), scopeAttrs, resourceAttrs, dCtx))
145149
}
146150
case pmetric.MetricTypeExponentialHistogram:
147151
dps := metric.ExponentialHistogram().DataPoints()
148152
for i := 0; i < dps.Len(); i++ {
149153
dp := dps.At(i)
150154
dataPointsCounter.updateTimestamp(dp.Timestamp())
151155
dCtx := ottldatapoint.NewTransformContext(dp, metric, scopeMetrics.Metrics(), scopeMetrics.Scope(), resourceMetric.Resource(), scopeMetrics, resourceMetric)
152-
multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dp.Attributes(), dCtx))
156+
multiError = errors.Join(multiError, dataPointsCounter.update(ctx, dp.Attributes(), scopeAttrs, resourceAttrs, dCtx))
153157
}
154158
case pmetric.MetricTypeEmpty:
155159
multiError = errors.Join(multiError, fmt.Errorf("metric %q: invalid metric type: %v", metric.Name(), metric.Type()))
@@ -183,16 +187,18 @@ func (c *count) ConsumeLogs(ctx context.Context, ld plog.Logs) error {
183187
countMetrics.ResourceMetrics().EnsureCapacity(ld.ResourceLogs().Len())
184188
for i := 0; i < ld.ResourceLogs().Len(); i++ {
185189
resourceLog := ld.ResourceLogs().At(i)
190+
resourceAttrs := resourceLog.Resource().Attributes()
186191
counter := newCounter[ottllog.TransformContext](c.logsMetricDefs)
187192

188193
for j := 0; j < resourceLog.ScopeLogs().Len(); j++ {
189194
scopeLogs := resourceLog.ScopeLogs().At(j)
195+
scopeAttrs := scopeLogs.Scope().Attributes()
190196

191197
for k := 0; k < scopeLogs.LogRecords().Len(); k++ {
192198
logRecord := scopeLogs.LogRecords().At(k)
193199
counter.updateTimestamp(logRecord.Timestamp())
194200
lCtx := ottllog.NewTransformContext(logRecord, scopeLogs.Scope(), resourceLog.Resource(), scopeLogs, resourceLog)
195-
multiError = errors.Join(multiError, counter.update(ctx, logRecord.Attributes(), lCtx))
201+
multiError = errors.Join(multiError, counter.update(ctx, logRecord.Attributes(), scopeAttrs, resourceAttrs, lCtx))
196202
}
197203
}
198204

@@ -221,17 +227,19 @@ func (c *count) ConsumeProfiles(ctx context.Context, ld pprofile.Profiles) error
221227
countMetrics.ResourceMetrics().EnsureCapacity(ld.ResourceProfiles().Len())
222228
for i := 0; i < ld.ResourceProfiles().Len(); i++ {
223229
resourceProfile := ld.ResourceProfiles().At(i)
230+
resourceAttrs := resourceProfile.Resource().Attributes()
224231
counter := newCounter[ottlprofile.TransformContext](c.profilesMetricDefs)
225232

226233
for j := 0; j < resourceProfile.ScopeProfiles().Len(); j++ {
227234
scopeProfile := resourceProfile.ScopeProfiles().At(j)
235+
scopeAttrs := scopeProfile.Scope().Attributes()
228236

229237
for k := 0; k < scopeProfile.Profiles().Len(); k++ {
230238
profile := scopeProfile.Profiles().At(k)
231239
counter.updateTimestamp(profile.Time())
232240
pCtx := ottlprofile.NewTransformContext(profile, ld.Dictionary(), scopeProfile.Scope(), resourceProfile.Resource(), scopeProfile, resourceProfile)
233241
attributes := pprofile.FromAttributeIndices(ld.Dictionary().AttributeTable(), profile, ld.Dictionary())
234-
multiError = errors.Join(multiError, counter.update(ctx, attributes, pCtx))
242+
multiError = errors.Join(multiError, counter.update(ctx, attributes, scopeAttrs, resourceAttrs, pCtx))
235243
}
236244
}
237245

connector/countconnector/counter.go

Lines changed: 34 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ import (
1111
"go.opentelemetry.io/collector/pdata/pcommon"
1212
"go.opentelemetry.io/collector/pdata/pmetric"
1313

14+
utilattri "github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil"
1415
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil"
1516
)
1617

@@ -35,34 +36,46 @@ type attrCounter struct {
3536
count uint64
3637
}
3738

38-
func (c *counter[K]) update(ctx context.Context, attrs pcommon.Map, tCtx K) error {
39+
func (c *counter[K]) update(ctx context.Context, attrs, scopeAttrs, resourceAttrs pcommon.Map, tCtx K) error {
3940
var multiError error
4041
for name, md := range c.metricDefs {
4142
countAttrs := pcommon.NewMap()
4243
for _, attr := range md.attrs {
43-
if attrVal, ok := attrs.Get(attr.Key); ok {
44-
switch typeAttr := attrVal.Type(); typeAttr {
44+
dimension := utilattri.Dimension{
45+
Name: attr.Key,
46+
Value: func() *pcommon.Value {
47+
if attr.DefaultValue != nil {
48+
switch v := attr.DefaultValue.(type) {
49+
case string:
50+
if v != "" {
51+
strV := pcommon.NewValueStr(v)
52+
return &strV
53+
}
54+
case int:
55+
if v != 0 {
56+
intV := pcommon.NewValueInt(int64(v))
57+
return &intV
58+
}
59+
case float64:
60+
if v != 0 {
61+
floatV := pcommon.NewValueDouble(v)
62+
return &floatV
63+
}
64+
}
65+
}
66+
67+
return nil
68+
}(),
69+
}
70+
value, ok := utilattri.GetDimensionValue(dimension, attrs, scopeAttrs, resourceAttrs)
71+
if ok {
72+
switch value.Type() {
4573
case pcommon.ValueTypeInt:
46-
countAttrs.PutInt(attr.Key, attrVal.Int())
74+
countAttrs.PutInt(attr.Key, value.Int())
4775
case pcommon.ValueTypeDouble:
48-
countAttrs.PutDouble(attr.Key, attrVal.Double())
76+
countAttrs.PutDouble(attr.Key, value.Double())
4977
default:
50-
countAttrs.PutStr(attr.Key, attrVal.Str())
51-
}
52-
} else if attr.DefaultValue != nil {
53-
switch v := attr.DefaultValue.(type) {
54-
case string:
55-
if v != "" {
56-
countAttrs.PutStr(attr.Key, v)
57-
}
58-
case int:
59-
if v != 0 {
60-
countAttrs.PutInt(attr.Key, int64(v))
61-
}
62-
case float64:
63-
if v != 0 {
64-
countAttrs.PutDouble(attr.Key, float64(v))
65-
}
78+
countAttrs.PutStr(attr.Key, value.Str())
6679
}
6780
}
6881
}

0 commit comments

Comments
 (0)