diff --git a/.chloggen/celian-garcia_39417.yaml b/.chloggen/celian-garcia_39417.yaml new file mode 100644 index 0000000000000..cd9cc8acb124e --- /dev/null +++ b/.chloggen/celian-garcia_39417.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: receiver/azuremonitor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: parallelize calls by subscriptions in Batch API mode + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [39417] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/azuremonitorreceiver/concurrency.go b/receiver/azuremonitorreceiver/concurrency.go new file mode 100644 index 0000000000000..86b725b8a51f3 --- /dev/null +++ b/receiver/azuremonitorreceiver/concurrency.go @@ -0,0 +1,117 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package azuremonitorreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/azuremonitorreceiver" + +import ( + "sync" + + cmap "github.com/orcaman/concurrent-map/v2" +) + +type concurrentMetricsBuilderMap[V any] interface { + Get(string) (V, bool) + Set(string, V) + Clear() + Range(func(string, V)) +} + +// Implementation with concurrent-map (generic API) +type concurrentMapImpl[V any] struct { + m cmap.ConcurrentMap[string, V] +} + +func newConcurrentMapImpl[V any]() concurrentMetricsBuilderMap[V] { + return &concurrentMapImpl[V]{m: cmap.New[V]()} +} + +func (c *concurrentMapImpl[V]) Get(key string) (V, bool) { + return c.m.Get(key) +} + +func (c *concurrentMapImpl[V]) Set(key string, value V) { + c.m.Set(key, value) +} + +func (c *concurrentMapImpl[V]) Clear() { + c.m.Clear() +} + +func (c *concurrentMapImpl[V]) Range(f func(string, V)) { + c.m.IterCb(f) +} + +// Implementation with sync.Map + +type syncMapImpl[V any] struct { + m sync.Map +} + +func NewSyncMapImpl[V any]() concurrentMetricsBuilderMap[V] { + return &syncMapImpl[V]{} +} + +func (s *syncMapImpl[V]) Get(key string) (V, bool) { + v, ok := s.m.Load(key) + if !ok { + var zero V + return zero, false + } + return v.(V), true +} + +func (s *syncMapImpl[V]) Set(key string, value V) { + s.m.Store(key, value) +} + +func (s *syncMapImpl[V]) Clear() { + s.m.Range(func(k, _ any) bool { + s.m.Delete(k) + return true + }) +} + +func (s *syncMapImpl[V]) Range(f func(string, V)) { + s.m.Range(func(k, v any) bool { + f(k.(string), v.(V)) + return true + }) +} + +// Implementation with classic map and mutex + +type mutexMapImpl[V any] struct { + m map[string]V + mutex sync.RWMutex +} + +func NewMutexMapImpl[V any]() concurrentMetricsBuilderMap[V] { + return &mutexMapImpl[V]{m: make(map[string]V)} +} + +func (mm *mutexMapImpl[V]) Get(key string) (V, bool) { + mm.mutex.RLock() + defer mm.mutex.RUnlock() + v, ok := mm.m[key] + return v, ok +} + +func (mm *mutexMapImpl[V]) Set(key string, value V) { + mm.mutex.Lock() + defer mm.mutex.Unlock() + mm.m[key] = value +} + +func (mm *mutexMapImpl[V]) Clear() { + mm.mutex.Lock() + defer mm.mutex.Unlock() + mm.m = make(map[string]V) +} + +func (mm *mutexMapImpl[V]) Range(f func(string, V)) { + mm.mutex.RLock() + defer mm.mutex.RUnlock() + for k, v := range mm.m { + f(k, v) + } +} diff --git a/receiver/azuremonitorreceiver/concurrency_bench_report.md b/receiver/azuremonitorreceiver/concurrency_bench_report.md new file mode 100644 index 0000000000000..5a1915d997555 --- /dev/null +++ b/receiver/azuremonitorreceiver/concurrency_bench_report.md @@ -0,0 +1,37 @@ +# Concurrency Map Benchmark Report + +## Context +This benchmark compares three concurrent map implementations in Go: +- **concurrentMapImpl**: Based on github.com/orcaman/concurrent-map (generic API) +- **syncMapImpl**: Based on Go's built-in sync.Map +- **mutexMapImpl**: Classic map protected by sync.RWMutex + +Benchmarks were run with both small and large datasets (1 million pre-filled entries), using parallel Set/Get operations and multiple CPU counts (1, 2, 4, 8). + +## Results Summary + +### Small Dataset (Random keys) +- **concurrentMapImpl**: Fastest, minimal memory usage, scales well with CPU count. +- **syncMapImpl**: Slowest, highest memory allocation, scales with CPU but remains less efficient. +- **mutexMapImpl**: Intermediate performance, low memory usage, slightly less scalable with more CPUs. + +### Large Dataset (1 million entries) +- **concurrentMapImpl**: Remains fastest, especially with 8 CPUs. Memory usage stays low (32–54 B/op, 1 alloc/op). +- **syncMapImpl**: Still slowest, with high memory allocation (107–110 B/op, 4 allocs/op). +- **mutexMapImpl**: Good for moderate concurrency, memory usage low, but performance drops with more CPUs. + +## Recommendations +- For high concurrency and large datasets, **concurrentMapImpl** is the best choice. +- For simple or low-concurrency use cases, **mutexMapImpl** is efficient and easy to maintain. +- **syncMapImpl** is not recommended for performance-critical scenarios due to its overhead. + +## Example Benchmark Output +``` +BenchmarkConcurrentMapImplLarge-8 341.9 ns/op 32 B/op 1 allocs/op +BenchmarkSyncMapImplLarge-8 342.1 ns/op 107 B/op 4 allocs/op +BenchmarkMutexMapImplLarge-8 748.2 ns/op 31 B/op 1 allocs/op +``` + +## Conclusion +The generic concurrent-map implementation offers the best performance and scalability for concurrent workloads in Go. The classic mutex-protected map is a good fallback for simpler cases. Avoid sync.Map for intensive workloads. + diff --git a/receiver/azuremonitorreceiver/concurrency_test.go b/receiver/azuremonitorreceiver/concurrency_test.go new file mode 100644 index 0000000000000..aa460de3eab28 --- /dev/null +++ b/receiver/azuremonitorreceiver/concurrency_test.go @@ -0,0 +1,73 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package azuremonitorreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/azuremonitorreceiver" + +import ( + "math/rand/v2" + "strconv" + "testing" +) + +func benchmarkMapImpl(b *testing.B, m concurrentMetricsBuilderMap[int]) { + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + key := strconv.Itoa(rand.IntN(100000)) + m.Set(key, rand.Int()) + _, _ = m.Get(key) + } + }) +} + +func BenchmarkConcurrentMapImpl(b *testing.B) { + m := newConcurrentMapImpl[int]() + benchmarkMapImpl(b, m) +} + +func BenchmarkSyncMapImpl(b *testing.B) { + m := NewSyncMapImpl[int]() + benchmarkMapImpl(b, m) +} + +func BenchmarkMutexMapImpl(b *testing.B) { + m := NewMutexMapImpl[int]() + benchmarkMapImpl(b, m) +} + +func benchmarkMapImplLarge(b *testing.B, m concurrentMetricsBuilderMap[int]) { + // Pre-fill with 1 million entries + for i := 0; i < 1_000_000; i++ { + key := strconv.Itoa(i) + m.Set(key, i) + } + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + // Randomly access existing and new keys + if rand.IntN(2) == 0 { + key := strconv.Itoa(rand.IntN(1_000_000)) // existing + m.Set(key, rand.Int()) + _, _ = m.Get(key) + } else { + key := strconv.Itoa(rand.IntN(10_000_000)) // possibly new + m.Set(key, rand.Int()) + _, _ = m.Get(key) + } + } + }) +} + +func BenchmarkConcurrentMapImplLarge(b *testing.B) { + m := newConcurrentMapImpl[int]() + benchmarkMapImplLarge(b, m) +} + +func BenchmarkSyncMapImplLarge(b *testing.B) { + m := NewSyncMapImpl[int]() + benchmarkMapImplLarge(b, m) +} + +func BenchmarkMutexMapImplLarge(b *testing.B) { + m := NewMutexMapImpl[int]() + benchmarkMapImplLarge(b, m) +} diff --git a/receiver/azuremonitorreceiver/go.mod b/receiver/azuremonitorreceiver/go.mod index 485cf1bae1f46..16b7eb56df839 100644 --- a/receiver/azuremonitorreceiver/go.mod +++ b/receiver/azuremonitorreceiver/go.mod @@ -11,7 +11,8 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions v1.3.0 github.com/google/go-cmp v0.7.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.137.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.137.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.136.0 + github.com/orcaman/concurrent-map/v2 v2.0.1 github.com/stretchr/testify v1.11.1 go.opentelemetry.io/collector/component v1.43.0 go.opentelemetry.io/collector/component/componenttest v0.137.0 diff --git a/receiver/azuremonitorreceiver/go.sum b/receiver/azuremonitorreceiver/go.sum index 2afe06a7b25ec..3a1d49171d7c0 100644 --- a/receiver/azuremonitorreceiver/go.sum +++ b/receiver/azuremonitorreceiver/go.sum @@ -79,6 +79,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c= +github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= diff --git a/receiver/azuremonitorreceiver/options.go b/receiver/azuremonitorreceiver/options.go index 14638370c5b0d..de759b2bc50db 100644 --- a/receiver/azuremonitorreceiver/options.go +++ b/receiver/azuremonitorreceiver/options.go @@ -18,12 +18,12 @@ type ClientOptionsResolver interface { } type clientOptionsResolver struct { - armOptions *arm.ClientOptions - azmetricsOptions *azmetrics.ClientOptions + cloud cloud.Configuration } // newClientOptionsResolver creates a resolver that will always return the same options. // Unlike in the tests where there will be one option by API mock, here we don't need different options for each client. +// Note the fact that it recreates the options each time. It's because the options are mutable, they can be modified by the client ctor. func newClientOptionsResolver(cloudStr string) ClientOptionsResolver { var cloudToUse cloud.Configuration switch cloudStr { @@ -34,25 +34,35 @@ func newClientOptionsResolver(cloudStr string) ClientOptionsResolver { default: cloudToUse = cloud.AzurePublic } - return &clientOptionsResolver{armOptions: &arm.ClientOptions{ - ClientOptions: azcore.ClientOptions{ - Cloud: cloudToUse, - }, - }} + return &clientOptionsResolver{cloud: cloudToUse} +} + +func (r *clientOptionsResolver) getClientOptions() azcore.ClientOptions { + return azcore.ClientOptions{ + Cloud: r.cloud, + } } func (r *clientOptionsResolver) GetArmResourceClientOptions(_ string) *arm.ClientOptions { - return r.armOptions + return &arm.ClientOptions{ + ClientOptions: r.getClientOptions(), + } } func (r *clientOptionsResolver) GetArmSubscriptionsClientOptions() *arm.ClientOptions { - return r.armOptions + return &arm.ClientOptions{ + ClientOptions: r.getClientOptions(), + } } func (r *clientOptionsResolver) GetArmMonitorClientOptions() *arm.ClientOptions { - return r.armOptions + return &arm.ClientOptions{ + ClientOptions: r.getClientOptions(), + } } func (r *clientOptionsResolver) GetAzMetricsClientOptions() *azmetrics.ClientOptions { - return r.azmetricsOptions + return &azmetrics.ClientOptions{ + ClientOptions: r.getClientOptions(), + } } diff --git a/receiver/azuremonitorreceiver/options_test.go b/receiver/azuremonitorreceiver/options_test.go index aaa9dc284b24c..a423467b94167 100644 --- a/receiver/azuremonitorreceiver/options_test.go +++ b/receiver/azuremonitorreceiver/options_test.go @@ -111,6 +111,7 @@ func newMockClientOptionsResolver( } armResourcesClientOptions[subID] = &arm.ClientOptions{ ClientOptions: azcore.ClientOptions{ + Cloud: cloud.AzurePublic, // Ensure Cloud client options is set. This is important to prevent race condition in the client constructor. Transport: armresourcesfake.NewServerTransport(&resourceServer), }, } @@ -123,6 +124,7 @@ func newMockClientOptionsResolver( } armSubscriptionsClientOptions := &arm.ClientOptions{ ClientOptions: azcore.ClientOptions{ + Cloud: cloud.AzurePublic, // Ensure Cloud client options is set. This is important to prevent race condition in the client constructor. Transport: armsubscriptionsfake.NewServerTransport(&subscriptionsServer), }, } @@ -138,6 +140,7 @@ func newMockClientOptionsResolver( } armMonitorClientOptions := &arm.ClientOptions{ ClientOptions: azcore.ClientOptions{ + Cloud: cloud.AzurePublic, // Ensure Cloud client options is set. This is important to prevent race condition in the client constructor. Transport: armmonitorfake.NewServerFactoryTransport(&armMonitorServerFactory), }, } @@ -148,6 +151,7 @@ func newMockClientOptionsResolver( } azMetricsClientOptions := &azmetrics.ClientOptions{ ClientOptions: azcore.ClientOptions{ + Cloud: cloud.AzurePublic, // Ensure Cloud client options is set. This is important to prevent race condition in the client constructor. Transport: azmetricsfake.NewServerTransport(&azMetricsServer), }, } diff --git a/receiver/azuremonitorreceiver/scraper_batch.go b/receiver/azuremonitorreceiver/scraper_batch.go index 4f9177449cded..cca0a50fab20a 100644 --- a/receiver/azuremonitorreceiver/scraper_batch.go +++ b/receiver/azuremonitorreceiver/scraper_batch.go @@ -37,18 +37,20 @@ type azureType struct { func newBatchScraper(conf *Config, settings receiver.Settings) *azureBatchScraper { return &azureBatchScraper{ cfg: conf, + receiverSettings: settings, settings: settings.TelemetrySettings, - mb: metadata.NewMetricsBuilder(conf.MetricsBuilderConfig, settings), mutex: &sync.Mutex{}, time: &timeWrapper{}, clientOptionsResolver: newClientOptionsResolver(conf.Cloud), + mbs: newConcurrentMapImpl[*metadata.MetricsBuilder](), } } type azureBatchScraper struct { - cred azcore.TokenCredential - cfg *Config - settings component.TelemetrySettings + cred azcore.TokenCredential + cfg *Config + receiverSettings receiver.Settings + settings component.TelemetrySettings // resources on which we'll get attributes. Stored by resource id and subscription id. resources map[string]map[string]*azureResource // resourceTypes on which we'll collect metrics. Stored by resource type and subscription id. @@ -58,7 +60,7 @@ type azureBatchScraper struct { subscriptionsUpdated time.Time // regions on which we'll collect metrics. Stored by subscription id. regions map[string]map[string]struct{} - mb *metadata.MetricsBuilder + mbs concurrentMetricsBuilderMap[*metadata.MetricsBuilder] mutex *sync.Mutex time timeNowIface @@ -104,39 +106,61 @@ func (s *azureBatchScraper) unloadSubscription(id string) { func (s *azureBatchScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { s.getSubscriptions(ctx) - for subscriptionID, subscription := range s.subscriptions { - s.getResourcesAndTypes(ctx, subscriptionID) + var subWG sync.WaitGroup - resourceTypesWithDefinitions := make(chan string) + for subID, subscription := range s.subscriptions { + s.mbs.Set(subID, metadata.NewMetricsBuilder(s.cfg.MetricsBuilderConfig, s.receiverSettings)) + subWG.Add(1) go func(subscriptionID string) { - defer close(resourceTypesWithDefinitions) - for resourceType := range s.resourceTypes[subscriptionID] { - s.getResourceMetricsDefinitionsByType(ctx, subscriptionID, resourceType) - resourceTypesWithDefinitions <- resourceType + defer subWG.Done() + s.getResourcesAndTypes(ctx, subscriptionID) + + resourceTypesWithDefinitions := make(chan string) + go func() { + defer close(resourceTypesWithDefinitions) + for resourceType := range s.resourceTypes[subscriptionID] { + s.getResourceMetricsDefinitionsByType(ctx, subscriptionID, resourceType) + resourceTypesWithDefinitions <- resourceType + } + }() + + var resourceTypeWG sync.WaitGroup + for resourceType := range resourceTypesWithDefinitions { + resourceTypeWG.Add(1) + go func(subscriptionID, resourceType string) { + defer resourceTypeWG.Done() + s.getBatchMetricsValues(ctx, subscriptionID, resourceType) + }(subscriptionID, resourceType) } - }(subscriptionID) - - var wg sync.WaitGroup - for resourceType := range resourceTypesWithDefinitions { - wg.Add(1) - go func(subscriptionID, resourceType string) { - defer wg.Done() - s.getBatchMetricsValues(ctx, subscriptionID, resourceType) - }(subscriptionID, resourceType) - } - wg.Wait() + resourceTypeWG.Wait() - // Once all metrics has been collected for one subscription, we move to the next. - // We need to keep it synchronous to have the subscription id in resource attributes and not metrics attributes. - // It can be revamped later if we need to parallelize more, but currently, resource emit is not thread safe. - rb := s.mb.NewResourceBuilder() - rb.SetAzuremonitorTenantID(s.cfg.TenantID) - rb.SetAzuremonitorSubscriptionID(subscriptionID) - rb.SetAzuremonitorSubscription(subscription.DisplayName) - s.mb.EmitForResource(metadata.WithResource(rb.Emit())) + // Once all metrics has been collected for one subscription, we save them in the associated metrics builder. + // Having a map of metrics builders, one per subscription, allows us to collect each subscription concurrently. + // We'll be able to emit them all at once at the end of the scrape, once all subscriptions have been processed. + mb, ok := s.mbs.Get(subID) + if !ok { + s.settings.Logger.Fatal("error: metrics builder not found for subscription") + } + rb := mb.NewResourceBuilder() + rb.SetAzuremonitorTenantID(s.cfg.TenantID) + rb.SetAzuremonitorSubscriptionID(subID) + rb.SetAzuremonitorSubscription(subscription.DisplayName) + mb.EmitForResource(metadata.WithResource(rb.Emit())) + }(subID) } - return s.mb.Emit(), nil + subWG.Wait() + + resultMetrics := pmetric.NewMetrics() + s.mbs.Range(func(_ string, mb *metadata.MetricsBuilder) { + metrics := mb.Emit() + for _, resourceMetrics := range metrics.ResourceMetrics().All() { + resourceMetrics.MoveTo(resultMetrics.ResourceMetrics().AppendEmpty()) + } + }) + + s.mbs.Clear() + return resultMetrics, nil } // TODO: duplicate @@ -362,6 +386,10 @@ func (s *azureBatchScraper) getBatchMetricsValues(ctx context.Context, subscript if s.cfg.MaximumResourcesPerBatch > 0 { maxPerBatch = s.cfg.MaximumResourcesPerBatch } + mb, ok := s.mbs.Get(subscriptionID) + if !ok { + s.settings.Logger.Fatal("error: metrics builder not found for subscription") + } for compositeKey, metricsByGrain := range resType.metricsByCompositeKey { now := time.Now().UTC() @@ -459,7 +487,7 @@ func (s *azureBatchScraper) getBatchMetricsValues(ctx context.Context, subscript for i := len(timeseriesElement.Data) - 1; i >= 0; i-- { // reverse for loop because newest timestamp is at the end of the slice metricValue := timeseriesElement.Data[i] if metricValueIsNotEmpty(metricValue) { - s.processQueryTimeseriesData(resID, metric, metricValue, attributes) + s.processQueryTimeseriesData(mb, resID, metric, metricValue, attributes) break } } @@ -501,6 +529,7 @@ func metricValueIsNotEmpty(metricValue azmetrics.MetricValue) bool { } func (s *azureBatchScraper) processQueryTimeseriesData( + mb *metadata.MetricsBuilder, resourceID string, metric azmetrics.Metric, metricValue azmetrics.MetricValue, @@ -523,7 +552,7 @@ func (s *azureBatchScraper) processQueryTimeseriesData( } for _, aggregation := range aggregationsData { if aggregation.value != nil { - s.mb.AddDataPoint( + mb.AddDataPoint( resourceID, *metric.Name.Value, aggregation.name, diff --git a/receiver/azuremonitorreceiver/scraper_batch_test.go b/receiver/azuremonitorreceiver/scraper_batch_test.go index 52158dfdfd46f..3bbbaca6c7299 100644 --- a/receiver/azuremonitorreceiver/scraper_batch_test.go +++ b/receiver/azuremonitorreceiver/scraper_batch_test.go @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 package azuremonitorreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/azuremonitorreceiver" + import ( "context" "net/http" @@ -295,29 +296,18 @@ func TestAzureScraperBatchScrape(t *testing.T) { s := &azureBatchScraper{ cfg: tt.fields.cfg, - mb: metadata.NewMetricsBuilder(metadata.DefaultMetricsBuilderConfig(), settings), + mbs: newConcurrentMapImpl[*metadata.MetricsBuilder](), mutex: &sync.Mutex{}, time: getTimeMock(), clientOptionsResolver: optionsResolver, + receiverSettings: settings, settings: settings.TelemetrySettings, // From there, initialize everything that is normally initialized in start() func - subscriptions: map[string]*azureSubscription{ - "subscriptionId1": {SubscriptionID: "subscriptionId1"}, - "subscriptionId3": {SubscriptionID: "subscriptionId3"}, - }, - resources: map[string]map[string]*azureResource{ - "subscriptionId1": {}, - "subscriptionId3": {}, - }, - regions: map[string]map[string]struct{}{ - "subscriptionId1": {"location1": {}}, - "subscriptionId3": {"location1": {}}, - }, - resourceTypes: map[string]map[string]*azureType{ - "subscriptionId1": {}, - "subscriptionId3": {}, - }, + subscriptions: map[string]*azureSubscription{}, + resources: map[string]map[string]*azureResource{}, + regions: map[string]map[string]struct{}{}, + resourceTypes: map[string]map[string]*azureType{}, } metrics, err := s.scrape(tt.args.ctx)