From 38a4016bddf860f0eddc32c2543452a99ef2f834 Mon Sep 17 00:00:00 2001 From: Lisa Guo Date: Thu, 2 Nov 2023 16:41:45 -0400 Subject: [PATCH] Implement AppSignals on EKS and native EC2 (#929) Co-authored-by: Jason Polanco Co-authored-by: Ping Xiang <64551395+pxaws@users.noreply.github.com> Co-authored-by: nanzhenAWS <133444984+nanzhenAWS@users.noreply.github.com> Co-authored-by: Harry Co-authored-by: Hyunsoo Kim <884273+movence@users.noreply.github.com> Co-authored-by: Mahad Janjua <134644284+majanjua-amzn@users.noreply.github.com> Co-authored-by: Mengyi Zhou (bjrara) Co-authored-by: Thomas Pierce Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Github Action Co-authored-by: Kaushik Surya <108111936+sky333999@users.noreply.github.com> Co-authored-by: Dinakar Chappa --- .github/workflows/otel-fork-replace.yml | 4 + go.mod | 15 +- go.sum | 22 +- plugins/processors/awsappsignals/README.md | 130 ++ plugins/processors/awsappsignals/config.go | 18 + .../processors/awsappsignals/config_test.go | 104 ++ plugins/processors/awsappsignals/factory.go | 97 ++ .../internal/attributes/attributes.go | 28 + .../normalizer/attributesnormalizer.go | 109 ++ .../normalizer/attributesnormalizer_test.go | 105 ++ .../internal/resolver/attributesresolver.go | 90 ++ .../resolver/attributesresolver_test.go | 67 + .../awsappsignals/internal/resolver/eks.go | 702 ++++++++++ .../internal/resolver/eks_test.go | 1131 +++++++++++++++++ plugins/processors/awsappsignals/processor.go | 269 ++++ .../processors/awsappsignals/rules/common.go | 120 ++ .../awsappsignals/rules/common_test.go | 23 + .../processors/awsappsignals/rules/dropper.go | 35 + .../awsappsignals/rules/dropper_test.go | 180 +++ .../processors/awsappsignals/rules/keeper.go | 35 + .../awsappsignals/rules/keeper_test.go | 175 +++ .../awsappsignals/rules/replacer.go | 54 + .../awsappsignals/rules/replacer_test.go | 281 ++++ .../awsappsignals/testdata/config.yaml | 29 + service/defaultcomponents/components.go | 6 + service/defaultcomponents/components_test.go | 6 +- translator/config/schema.json | 83 +- .../appsignals_and_kubernetes_config.conf | 27 + .../appsignals_and_kubernetes_config.json | 24 + .../appsignals_and_kubernetes_config.yaml | 664 ++++++++++ .../sampleConfig/base_appsignals_config.conf | 27 + .../sampleConfig/base_appsignals_config.json | 17 + .../sampleConfig/base_appsignals_config.yaml | 466 +++++++ translator/tocwconfig/tocwconfig_test.go | 22 + .../translate/otel/common/appsignals.go | 13 + .../translate/otel/common/appsignals_test.go | 16 + translator/translate/otel/common/common.go | 26 + .../awsemf/appsignals_config_eks.yaml | 36 + .../awsemf/appsignals_config_generic.yaml | 31 + .../otel/exporter/awsemf/translator.go | 12 + .../otel/exporter/awsemf/translator_test.go | 54 + .../otel/exporter/awsxray/translator.go | 14 + .../otel/exporter/awsxray/translator_test.go | 29 + .../otel/extension/awsproxy/translator.go | 37 + .../extension/awsproxy/translator_test.go | 26 + .../otel/pipeline/appsignals/translator.go | 63 + .../pipeline/appsignals/translator_test.go | 121 ++ .../awsappsignals/testdata/config_eks.yaml | 1 + .../testdata/config_generic.yaml | 1 + .../testdata/invalidRulesConfig.json | 20 + .../testdata/validRulesConfig.json | 63 + .../testdata/validRulesConfigEKS.yaml | 28 + .../testdata/validRulesConfigGeneric.yaml | 28 + .../processor/awsappsignals/translator.go | 128 ++ .../awsappsignals/translator_test.go | 105 ++ .../resourcedetection/configs/config.yaml | 6 + .../processor/resourcedetection/translator.go | 61 + .../resourcedetection/translator_test.go | 60 + .../otel/receiver/otlp/appsignals_config.yaml | 5 + .../otel/receiver/otlp/translator.go | 20 +- .../otel/receiver/otlp/translator_test.go | 44 + translator/translate/otel/translate_otel.go | 3 + .../translate/otel/translate_otel_test.go | 32 + 63 files changed, 6238 insertions(+), 10 deletions(-) create mode 100644 plugins/processors/awsappsignals/README.md create mode 100644 plugins/processors/awsappsignals/config.go create mode 100644 plugins/processors/awsappsignals/config_test.go create mode 100644 plugins/processors/awsappsignals/factory.go create mode 100644 plugins/processors/awsappsignals/internal/attributes/attributes.go create mode 100644 plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer.go create mode 100644 plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer_test.go create mode 100644 plugins/processors/awsappsignals/internal/resolver/attributesresolver.go create mode 100644 plugins/processors/awsappsignals/internal/resolver/attributesresolver_test.go create mode 100644 plugins/processors/awsappsignals/internal/resolver/eks.go create mode 100644 plugins/processors/awsappsignals/internal/resolver/eks_test.go create mode 100644 plugins/processors/awsappsignals/processor.go create mode 100644 plugins/processors/awsappsignals/rules/common.go create mode 100644 plugins/processors/awsappsignals/rules/common_test.go create mode 100644 plugins/processors/awsappsignals/rules/dropper.go create mode 100644 plugins/processors/awsappsignals/rules/dropper_test.go create mode 100644 plugins/processors/awsappsignals/rules/keeper.go create mode 100644 plugins/processors/awsappsignals/rules/keeper_test.go create mode 100644 plugins/processors/awsappsignals/rules/replacer.go create mode 100644 plugins/processors/awsappsignals/rules/replacer_test.go create mode 100644 plugins/processors/awsappsignals/testdata/config.yaml create mode 100644 translator/tocwconfig/sampleConfig/appsignals_and_kubernetes_config.conf create mode 100644 translator/tocwconfig/sampleConfig/appsignals_and_kubernetes_config.json create mode 100644 translator/tocwconfig/sampleConfig/appsignals_and_kubernetes_config.yaml create mode 100644 translator/tocwconfig/sampleConfig/base_appsignals_config.conf create mode 100644 translator/tocwconfig/sampleConfig/base_appsignals_config.json create mode 100644 translator/tocwconfig/sampleConfig/base_appsignals_config.yaml create mode 100644 translator/translate/otel/common/appsignals.go create mode 100644 translator/translate/otel/common/appsignals_test.go create mode 100644 translator/translate/otel/exporter/awsemf/appsignals_config_eks.yaml create mode 100644 translator/translate/otel/exporter/awsemf/appsignals_config_generic.yaml create mode 100644 translator/translate/otel/extension/awsproxy/translator.go create mode 100644 translator/translate/otel/extension/awsproxy/translator_test.go create mode 100644 translator/translate/otel/pipeline/appsignals/translator.go create mode 100644 translator/translate/otel/pipeline/appsignals/translator_test.go create mode 100644 translator/translate/otel/processor/awsappsignals/testdata/config_eks.yaml create mode 100644 translator/translate/otel/processor/awsappsignals/testdata/config_generic.yaml create mode 100644 translator/translate/otel/processor/awsappsignals/testdata/invalidRulesConfig.json create mode 100644 translator/translate/otel/processor/awsappsignals/testdata/validRulesConfig.json create mode 100644 translator/translate/otel/processor/awsappsignals/testdata/validRulesConfigEKS.yaml create mode 100644 translator/translate/otel/processor/awsappsignals/testdata/validRulesConfigGeneric.yaml create mode 100644 translator/translate/otel/processor/awsappsignals/translator.go create mode 100644 translator/translate/otel/processor/awsappsignals/translator_test.go create mode 100644 translator/translate/otel/processor/resourcedetection/configs/config.yaml create mode 100644 translator/translate/otel/processor/resourcedetection/translator.go create mode 100644 translator/translate/otel/processor/resourcedetection/translator_test.go create mode 100644 translator/translate/otel/receiver/otlp/appsignals_config.yaml diff --git a/.github/workflows/otel-fork-replace.yml b/.github/workflows/otel-fork-replace.yml index eb56e137d7..553eed7d48 100644 --- a/.github/workflows/otel-fork-replace.yml +++ b/.github/workflows/otel-fork-replace.yml @@ -38,6 +38,10 @@ jobs: git config --global user.name 'Github Action' git config --global user.email 'action@github.com' git checkout -b otel-fork-replace-${{ steps.get-latest-commit.outputs.sha }} + go mod edit -replace go.opentelemetry.io/collector/config/confighttp=github.com/amazon-contributing/opentelemetry-collector-contrib/config/confighttp@${{ steps.get-latest-commit.outputs.sha }} + go mod tidy + go mod edit -replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor=github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor@${{ steps.get-latest-commit.outputs.sha }} + go mod tidy go mod edit -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter=github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter@${{ steps.get-latest-commit.outputs.sha }} go mod tidy go mod edit -replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter=github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsxrayexporter@${{ steps.get-latest-commit.outputs.sha }} diff --git a/go.mod b/go.mod index 474e871a4f..737b01833d 100644 --- a/go.mod +++ b/go.mod @@ -33,6 +33,10 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscl replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver => github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.0.0-20231102130031-505e23230a50 +replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor => github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20230928170322-0df38c533713 + +replace go.opentelemetry.io/collector/config/confighttp => github.com/amazon-contributing/opentelemetry-collector-contrib/config/confighttp v0.0.0-20230928170322-0df38c533713 + replace github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws => github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231102130031-505e23230a50 // Temporary fix, pending PR https://github.com/shirou/gopsutil/pull/957 @@ -98,6 +102,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/eks v1.27.15 github.com/aws/smithy-go v1.15.0 github.com/bigkevmcd/go-configparser v0.0.0-20200217161103-d137835d2579 + github.com/deckarep/golang-set/v2 v2.3.1 github.com/go-kit/log v0.2.1 github.com/gobwas/glob v0.2.3 github.com/google/cadvisor v0.47.3 // indirect @@ -113,10 +118,12 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.84.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.84.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.84.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.84.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.84.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.84.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.84.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.84.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.84.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.84.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.84.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.84.0 @@ -143,6 +150,7 @@ require ( go.opentelemetry.io/collector/processor/batchprocessor v0.84.1-0.20230908201109-ab3d6c5b6470 go.opentelemetry.io/collector/receiver v0.84.1-0.20230908201109-ab3d6c5b6470 go.opentelemetry.io/collector/receiver/otlpreceiver v0.84.0 + go.opentelemetry.io/collector/semconv v0.84.1-0.20230908201109-ab3d6c5b6470 go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.25.0 @@ -155,7 +163,7 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/yaml.v3 v3.0.1 - gotest.tools/v3 v3.1.0 + gotest.tools/v3 v3.2.0 k8s.io/api v0.28.1 k8s.io/apimachinery v0.28.1 k8s.io/client-go v0.28.1 @@ -176,7 +184,9 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.19.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Showmax/go-fqdn v1.0.0 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/alecthomas/participle v0.4.1 // indirect github.com/alecthomas/participle/v2 v2.0.0 // indirect @@ -308,6 +318,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.84.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.84.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.84.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.84.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.84.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.84.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.84.0 // indirect @@ -317,6 +328,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.84.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.84.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.84.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.84.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.84.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.84.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.84.0 // indirect @@ -373,7 +385,6 @@ require ( go.opentelemetry.io/collector/connector v0.84.1-0.20230908201109-ab3d6c5b6470 // indirect go.opentelemetry.io/collector/extension/auth v0.84.1-0.20230908201109-ab3d6c5b6470 // indirect go.opentelemetry.io/collector/featuregate v1.0.0-rcv0014.0.20230908201109-ab3d6c5b6470 // indirect - go.opentelemetry.io/collector/semconv v0.84.1-0.20230908201109-ab3d6c5b6470 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.43.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.43.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.17.0 // indirect diff --git a/go.sum b/go.sum index 95c130e960..ce6b9ca655 100644 --- a/go.sum +++ b/go.sum @@ -100,6 +100,8 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.19.1 h1:LyRJCTBJP53P1JURFbhFSRz36gxaBtMAjzjlYupNR7Q= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.19.1/go.mod h1:Xx0VKh7GJ4si3rmElbh19Mejxz68ibWg/J30ZOMrqzU= github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo= @@ -116,6 +118,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.32.0 h1:P+RUjEaRU0GMMbYexGMDyrMkLhbbBVUVISDywi+IlFU= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/Showmax/go-fqdn v1.0.0 h1:0rG5IbmVliNT5O19Mfuvna9LL7zlHyRfsSvBPZmF9tM= +github.com/Showmax/go-fqdn v1.0.0/go.mod h1:SfrFBzmDCtCGrnHhoDjuvFnKsWjEQX/Q9ARZvOrJAko= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -140,6 +144,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1483 h1:J8HaD+Zpfi1gcel3HCKpoHHEsrcuRrZlSnx7R9SCf5I= +github.com/amazon-contributing/opentelemetry-collector-contrib/config/confighttp v0.0.0-20230928170322-0df38c533713 h1:ACWoE8NqyI13oKdpg+074JIta5wmxsL1kOirTzDO5Gk= +github.com/amazon-contributing/opentelemetry-collector-contrib/config/confighttp v0.0.0-20230928170322-0df38c533713/go.mod h1:4g9P7MZPReFnNJD0lpavI/LR0vIwlsTJov+hJoKT+nM= github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231102130031-505e23230a50 h1:MFm/DA3NTQQ3LjON7cj3a53VBwCjRFzBy6UCjfkJxKc= github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.0.0-20231102130031-505e23230a50/go.mod h1:/8w8sPrpOeADRJgMsu8o4jOiFX29zCC899+ao7S1GXI= github.com/amazon-contributing/opentelemetry-collector-contrib/exporter/awsemfexporter v0.0.0-20231102130031-505e23230a50 h1:Qzmqql2XCdgK3m65CK41hMAopfFNEbJpDUjs9xX5aSk= @@ -162,6 +168,8 @@ github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0 github.com/amazon-contributing/opentelemetry-collector-contrib/override/aws v0.0.0-20231102130031-505e23230a50/go.mod h1:F5l/VuHtB8418NLJEsHeYz/pni6sWtOMR/SM6mgarhQ= github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713 h1:2daWNVtWNvRDoCTN5GG5N+LEM9OuY3RjJ0cboU3+xmM= github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/stanza v0.0.0-20230928170322-0df38c533713/go.mod h1:lJLumMdUeKqurOskauSjhH4J2hz8r0iNyQWDl3i5NSM= +github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20230928170322-0df38c533713 h1:tKGat0aoXPkscaShYYRbnXH14asXqi1Iem4K3nMrNpk= +github.com/amazon-contributing/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.0.0-20230928170322-0df38c533713/go.mod h1:fbCDqcaNUzfvbpI4y91hT8UfV18VyIKfS42BsPRDAuc= github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231102130031-505e23230a50 h1:vLAUJwQUtH2OQ9QkkehyxXI//WalYbNgKU2nqb48LR8= github.com/amazon-contributing/opentelemetry-collector-contrib/pkg/translator/prometheus v0.0.0-20231102130031-505e23230a50/go.mod h1:9qsT0AsMflbQKz0ojK3aRU/PbyGQCDPKut3XMfAkW8k= github.com/amazon-contributing/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.0.0-20231102130031-505e23230a50 h1:09XDd1Ta4n7nLlDUBASj6PoO5j/VwkRWQfvrvFNcoW0= @@ -358,6 +366,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A= +github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= @@ -966,7 +976,11 @@ github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je4 github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.84.0 h1:WFCStS52a3bMG9nEG6Eo8+EfgtXfGclTN/H4as7jdkE= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.84.0 h1:ysq9+0eESy8Dj7Yp3Ijn6uDAtvR+h+LoTwzZbw78gRc= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.84.0/go.mod h1:CwZNGQC/yVWCJrltkkGGGPeUUqtWuLQyADWfmqfol+Q= github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.84.0 h1:elXwC0d93Y4/t5nZmSVVX6EH9GmGgKubNCFbu3sjxEc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.84.0 h1:BWVaklpLBrFOLlW/nqT3o2onEhaMIOr2/LBnEciDVjQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.84.0/go.mod h1:e/k1LzterZSHvxWF3HG9Wo2VtuVrYAt0DE+inTVaD+g= github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.84.0 h1:KwJDgnqegui18ebzCfxHURRstPjY2CfM/yGRugcdOT8= github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.84.0/go.mod h1:WAScm+oitM87OWSy+pPAC6eCzg3xhYz3VBSef2+zV60= github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.84.0 h1:vfZIgsfOkrY+bqh3HGYWDtvxN5SR4B+IYJqY1733j+8= @@ -981,6 +995,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0. github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.84.0/go.mod h1:iL+tGP94Xdes4iUmss/Me8OOqvJhWeBWPcW8OgHQZyo= github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.84.0 h1:ezuTl9JDE/v83DnyJk5jLEumkgu0nxdoWXNTKmJ/+KA= github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.84.0/go.mod h1:u8PmrJN1vhdnUtyMkxoze478uOzX/bVTeJebvM5xyHU= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.84.0 h1:4xV4X+Zze+nNLkyK35LkD1AOL/V7qxgptsJLEWfxhj4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.84.0/go.mod h1:rcxnHaBL7R/VoO8yahgIFL7NZIdFaakl4Jpy2tbNeGc= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.84.0 h1:hCHJbiLdDwsswhfllFCq4fjRWUCz2GAHhzOB1n7jHK4= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.84.0/go.mod h1:0us3rAudWnHES0nOtmTsUjgQtlKJyiozaC5osgJM7cU= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.84.0 h1:NXzx/YViPbapdfI0RZ2RJk4XgKU099Ci9rGBsZ/div8= @@ -1267,8 +1283,6 @@ go.opentelemetry.io/collector/config/configcompression v0.84.1-0.20230908201109- go.opentelemetry.io/collector/config/configcompression v0.84.1-0.20230908201109-ab3d6c5b6470/go.mod h1:LaavoxZsro5lL7qh1g9DMifG0qixWPEecW18Qr8bpag= go.opentelemetry.io/collector/config/configgrpc v0.84.0 h1:wWYWbmm8EHm33Xllfe1o+AEmVHbjx4A5bDSx9TfM3bc= go.opentelemetry.io/collector/config/configgrpc v0.84.0/go.mod h1:cs/g9nIJZh/R6G8XoBIO8chtU0RoiaG3DcP454pKenY= -go.opentelemetry.io/collector/config/confighttp v0.84.1-0.20230908201109-ab3d6c5b6470 h1:r93l0jZH/52k6Etzk3s0emCbzZNHZHRkD4AZQVVBXnI= -go.opentelemetry.io/collector/config/confighttp v0.84.1-0.20230908201109-ab3d6c5b6470/go.mod h1:YUrduvwwO7zNGS9V2VrVof2R6liY/TekAyF+tkaRJm4= go.opentelemetry.io/collector/config/confignet v0.84.1-0.20230908201109-ab3d6c5b6470 h1:QOIvMZc5E8KT+1r65KxohRtjjn3hac+YF5nnnGbTJc4= go.opentelemetry.io/collector/config/confignet v0.84.1-0.20230908201109-ab3d6c5b6470/go.mod h1:cpO8JYWGONaViOygKVw+Hd2UoBcn2cUiyi0WWeFTwJY= go.opentelemetry.io/collector/config/configopaque v0.84.1-0.20230908201109-ab3d6c5b6470 h1:t6LXR0S4c3FfXGJ/hbwqdHeX7I4G6TOc83SPqGCkKPE= @@ -1931,8 +1945,8 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk= -gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= +gotest.tools/v3 v3.2.0 h1:I0DwBVMGAx26dttAj1BtJLAkVGncrkkUXfJLC4Flt/I= +gotest.tools/v3 v3.2.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/plugins/processors/awsappsignals/README.md b/plugins/processors/awsappsignals/README.md new file mode 100644 index 0000000000..4906b69a46 --- /dev/null +++ b/plugins/processors/awsappsignals/README.md @@ -0,0 +1,130 @@ +# AWS AppSignals Processor for Amazon Cloudwatch Agent + +The AWS AppSignals processor is used to reduce the cardinality of telemetry metrics and traces before exporting them to CloudWatch Logs via [EMF](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/awsemfexporter) and [X-Ray](github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter) respectively. +It reduces the cardinality of metrics/traces via 3 types of actions, `keep`, `drop` and `replace`, which are configured by users. CloudWatch Agent(CWA) customers will configure these rules with their CWA configurations. + +Note: Traces support only `replace` actions and are implicitly pulled from the logs section of the CWA configuration + +| Status | | +| ------------------------ |---------------------------| +| Stability | [beta] | +| Supported pipeline types | metrics, traces | +| Distributions | [amazon-cloudwatch-agent] | + +## Exporter Configuration + +The following exporter configuration parameters are supported. + +| Name | Description | Default | +|:---------------------------------------------|:------------------------------------------------------------------------------------------------------------------|---------| +| `resolvers` | Platform processor is being configured for. Currently supports EKS. EC2 platform will be supported in the future. | [eks] | +| `rules` | Custom configuration rules used for filtering metrics/traces. Can be of type `drop`, `keep`, `replace`. | [] | + +### rules +The rules section defines the rules (filters) to be applied + +| Name | Description | Default | +|:---------------|:-------------------------------------------------------------------------------------------------------------------------| --- | +| `selectors` | List of metrics/traces dimension matchers. | [] | +| `action` | Action being applied for the specified selector. `keep`, `drop`, `replace` | "" | +| `rule_name` | (Optional) Name of rule. | [] | +| `replacements` | (Optional) List of metrics/traces replacements to be executed. Based on specified selectors. requires `action = replace` | [] | + +#### selectors +A selectors section defines a matching against the dimensions of incoming metrics/traces. + +| Name | Description | Default | +|:------------|:--------------------------------------------------------------| ------ | +| `dimension` | Dimension of metrics/traces | "" | +| `match` | glob used for matching values of dimensions | "" | + +### replacements +A replacements section defines a matching against the dimensions of incoming metrics/traces for which value replacements will be done. action must be `replace` + +| Name | Description | Default | +|:-------------------|:----------------------------------------------| ------ | +| `target_dimension` | Dimension to replace | "" | +| `value` | Value to replace current dimension value with | "" | + + +## AWS AppSignals Processor Configuration Example + +```yaml +awsappsignals: + resolvers: ["eks"] + rules: + - selectors: + - dimension: Operation + match: "POST *" + - dimension: RemoteService + match: "*" + action: keep + rule_name: "keep01" + - selectors: + - dimension: Operation + match: "GET *" + - dimension: RemoteService + match: "*" + action: keep + rule_name: "keep02" + - selectors: + - dimension: Operation + match: "POST *" + action: drop + rule_name: "drop01" + - selectors: + - dimension: Operation + match: "*" + replacements: + - target_dimension: RemoteOperation + value: "This is a test string" + action: replace + rule_name: "replace01" +``` + +## Amazon CloudWatch Agent Configuration Example + +```json +{ + "agent": { + "region": "us-west-2", + "debug": true + }, + "traces": { + "traces_collected": { + "app_signals": {} + } + }, + "logs": { + "metrics_collected": { + "app_signals": { + "rules": [ + { + "selectors": [ + { + "dimension": "Service", + "match": "pet-clinic-frontend" + }, + { + "dimension": "RemoteService", + "match": "customers-service" + } + ], + "action": "keep", + "rule_name": "keep01" + }, + { + "selectors": [ + { + "dimension": "Operation", + "match": "GET *" + } + ], + "action": "drop", + "rule_name": "drop01" + } + } + } + } + } +``` \ No newline at end of file diff --git a/plugins/processors/awsappsignals/config.go b/plugins/processors/awsappsignals/config.go new file mode 100644 index 0000000000..82f7283c91 --- /dev/null +++ b/plugins/processors/awsappsignals/config.go @@ -0,0 +1,18 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsappsignals + +import ( + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/rules" +) + +type Config struct { + Resolvers []string `mapstructure:"resolvers"` + Rules []rules.Rule `mapstructure:"rules"` +} + +func (cfg *Config) Validate() error { + // TODO: validate those mandatory fields (if exist) in the config + return nil +} diff --git a/plugins/processors/awsappsignals/config_test.go b/plugins/processors/awsappsignals/config_test.go new file mode 100644 index 0000000000..4cb8f47df9 --- /dev/null +++ b/plugins/processors/awsappsignals/config_test.go @@ -0,0 +1,104 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsappsignals + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap/confmaptest" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/rules" +) + +func TestLoadConfig(t *testing.T) { + t.Parallel() + + tests := []struct { + id component.ID + expected component.Config + errorMessage string + }{ + { + id: component.NewIDWithName("awsappsignals", ""), + expected: &Config{ + Resolvers: []string{"eks"}, + Rules: []rules.Rule{ + { + Selectors: []rules.Selector{ + { + Dimension: "Operation", + Match: "* /api/visits/*", + }, + { + Dimension: "RemoteOperation", + Match: "*", + }, + }, + Action: "keep", + RuleName: "keep01", + }, + { + Selectors: []rules.Selector{ + { + Dimension: "RemoteService", + Match: "UnknownRemoteService", + }, + { + Dimension: "RemoteOperation", + Match: "GetShardIterator", + }, + }, + Action: "drop", + }, + { + Selectors: []rules.Selector{ + { + Dimension: "Operation", + Match: "* /api/visits/*", + }, + { + Dimension: "RemoteOperation", + Match: "*", + }, + }, + Replacements: []rules.Replacement{ + { + TargetDimension: "RemoteOperation", + Value: "ListPetsByCustomer", + }, + { + TargetDimension: "ResourceTarget", + Value: " ", + }, + }, + Action: "replace", + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.id.String(), func(t *testing.T) { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + sub, err := cm.Sub(tt.id.String()) + require.NoError(t, err) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + + if tt.expected == nil { + assert.EqualError(t, component.ValidateConfig(cfg), tt.errorMessage) + return + } + assert.NoError(t, component.ValidateConfig(cfg)) + assert.Equal(t, tt.expected, cfg) + }) + } +} diff --git a/plugins/processors/awsappsignals/factory.go b/plugins/processors/awsappsignals/factory.go new file mode 100644 index 0000000000..8cb3bc6298 --- /dev/null +++ b/plugins/processors/awsappsignals/factory.go @@ -0,0 +1,97 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsappsignals + +import ( + "context" + "errors" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/processorhelper" +) + +const ( + // The value of "type" key in configuration. + typeStr = "awsappsignals" + // The stability level of the processor. + stability = component.StabilityLevelBeta +) + +var consumerCapabilities = consumer.Capabilities{MutatesData: true} + +// NewFactory returns a new factory for the aws attributes processor. +func NewFactory() processor.Factory { + return processor.NewFactory( + typeStr, + createDefaultConfig, + processor.WithTraces(createTracesProcessor, stability), + processor.WithMetrics(createMetricsProcessor, stability), + ) +} + +func createDefaultConfig() component.Config { + return &Config{ + // TODO: change default config when other resolvers are supported + Resolvers: []string{"eks"}, + } +} + +func createTracesProcessor( + ctx context.Context, + set processor.CreateSettings, + cfg component.Config, + next consumer.Traces, +) (processor.Traces, error) { + ap, err := createProcessor(set, cfg) + if err != nil { + return nil, err + } + + return processorhelper.NewTracesProcessor( + ctx, + set, + cfg, + next, + ap.processTraces, + processorhelper.WithCapabilities(consumerCapabilities), + processorhelper.WithStart(ap.Start), + processorhelper.WithShutdown(ap.Shutdown)) +} + +func createMetricsProcessor( + ctx context.Context, + set processor.CreateSettings, + cfg component.Config, + nextMetricsConsumer consumer.Metrics, +) (processor.Metrics, error) { + ap, err := createProcessor(set, cfg) + if err != nil { + return nil, err + } + + return processorhelper.NewMetricsProcessor( + ctx, + set, + cfg, + nextMetricsConsumer, + ap.processMetrics, + processorhelper.WithCapabilities(consumerCapabilities), + processorhelper.WithStart(ap.Start), + processorhelper.WithShutdown(ap.Shutdown)) +} + +func createProcessor( + params processor.CreateSettings, + cfg component.Config, +) (*awsappsignalsprocessor, error) { + pCfg, ok := cfg.(*Config) + if !ok { + return nil, errors.New("could not initialize awsappsignalsprocessor") + } + ap := &awsappsignalsprocessor{logger: params.Logger, config: pCfg} + + return ap, nil +} diff --git a/plugins/processors/awsappsignals/internal/attributes/attributes.go b/plugins/processors/awsappsignals/internal/attributes/attributes.go new file mode 100644 index 0000000000..bc7c1eb3eb --- /dev/null +++ b/plugins/processors/awsappsignals/internal/attributes/attributes.go @@ -0,0 +1,28 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package attributes + +const ( + // aws attributes + AWSLocalService = "aws.local.service" + AWSLocalOperation = "aws.local.operation" + AWSRemoteService = "aws.remote.service" + AWSRemoteOperation = "aws.remote.operation" + AWSRemoteTarget = "aws.remote.target" + AWSHostedInEnvironment = "aws.hostedin.environment" + + // kubernetes resource attributes + K8SDeploymentName = "k8s.deployment.name" + K8SStatefulSetName = "k8s.statefulset.name" + K8SDaemonSetName = "k8s.daemonset.name" + K8SJobName = "k8s.job.name" + K8SCronJobName = "k8s.cronjob.name" + K8SPodName = "k8s.pod.name" + K8SRemoteNamespace = "K8s.RemoteNamespace" + + // hosted in attribute names + HostedInClusterName = "HostedIn.EKS.Cluster" + HostedInK8SNamespace = "HostedIn.K8s.Namespace" + HostedInEnvironment = "HostedIn.Environment" +) diff --git a/plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer.go b/plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer.go new file mode 100644 index 0000000000..25a6af641e --- /dev/null +++ b/plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer.go @@ -0,0 +1,109 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package normalizer + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + "go.uber.org/zap" + + attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/internal/attributes" +) + +type attributesNormalizer struct { + logger *zap.Logger +} + +var renameMapForMetric = map[string]string{ + attr.AWSLocalService: "Service", + attr.AWSLocalOperation: "Operation", + attr.AWSRemoteService: "RemoteService", + attr.AWSRemoteOperation: "RemoteOperation", + attr.AWSRemoteTarget: "RemoteTarget", +} + +var renameMapForTrace = map[string]string{ + // these kubernetes resource attributes are set by the openTelemetry operator + // see the code references from upstream: + // * https://github.com/open-telemetry/opentelemetry-operator/blob/0e39ee77693146e0924da3ca474a0fe14dc30b3a/pkg/instrumentation/sdk.go#L245 + // * https://github.com/open-telemetry/opentelemetry-operator/blob/0e39ee77693146e0924da3ca474a0fe14dc30b3a/pkg/instrumentation/sdk.go#L305C43-L305C43 + attr.K8SDeploymentName: "K8s.Workload", + attr.K8SStatefulSetName: "K8s.Workload", + attr.K8SDaemonSetName: "K8s.Workload", + attr.K8SJobName: "K8s.Workload", + attr.K8SCronJobName: "K8s.Workload", + attr.K8SPodName: "K8s.Pod", +} + +var copyMapForMetric = map[string]string{ + // these kubernetes resource attributes are set by the openTelemtry operator + // see the code referecnes from upstream: + // * https://github.com/open-telemetry/opentelemetry-operator/blob/0e39ee77693146e0924da3ca474a0fe14dc30b3a/pkg/instrumentation/sdk.go#L245 + // * https://github.com/open-telemetry/opentelemetry-operator/blob/0e39ee77693146e0924da3ca474a0fe14dc30b3a/pkg/instrumentation/sdk.go#L305C43-L305C43 + attr.K8SDeploymentName: "K8s.Workload", + attr.K8SStatefulSetName: "K8s.Workload", + attr.K8SDaemonSetName: "K8s.Workload", + attr.K8SJobName: "K8s.Workload", + attr.K8SCronJobName: "K8s.Workload", + attr.K8SPodName: "K8s.Pod", +} + +func NewAttributesNormalizer(logger *zap.Logger) *attributesNormalizer { + return &attributesNormalizer{ + logger: logger, + } +} + +func (n *attributesNormalizer) Process(attributes, resourceAttributes pcommon.Map, isTrace bool) error { + n.copyResourceAttributesToAttributes(attributes, resourceAttributes, isTrace) + n.renameAttributes(attributes, resourceAttributes, isTrace) + return nil +} + +func (n *attributesNormalizer) renameAttributes(attributes, resourceAttributes pcommon.Map, isTrace bool) { + attrs := attributes + renameMap := renameMapForMetric + if isTrace { + attrs = resourceAttributes + renameMap = renameMapForTrace + } + + rename(attrs, renameMap) +} + +func (n *attributesNormalizer) copyResourceAttributesToAttributes(attributes, resourceAttributes pcommon.Map, isTrace bool) { + if isTrace { + return + } + for k, v := range copyMapForMetric { + if resourceAttrValue, ok := resourceAttributes.Get(k); ok { + // print some debug info when an attribute value is overwritten + if originalAttrValue, ok := attributes.Get(k); ok { + n.logger.Debug("attribute value is overwritten", zap.String("attribute", k), zap.String("original", originalAttrValue.AsString()), zap.String("new", resourceAttrValue.AsString())) + } + attributes.PutStr(v, resourceAttrValue.AsString()) + if k == attr.K8SPodName { + // only copy "host.id" from resource attributes to "K8s.Node" in attributesif the pod name is set + if host, ok := resourceAttributes.Get("host.id"); ok { + attributes.PutStr("K8s.Node", host.AsString()) + } + } + } + } +} + +func rename(attrs pcommon.Map, renameMap map[string]string) { + for original, replacement := range renameMap { + if value, ok := attrs.Get(original); ok { + attrs.PutStr(replacement, value.AsString()) + attrs.Remove(original) + if original == attr.K8SPodName { + // only rename host.id if the pod name is set + if host, ok := attrs.Get("host.id"); ok { + attrs.PutStr("K8s.Node", host.AsString()) + attrs.Remove("host.id") + } + } + } + } +} diff --git a/plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer_test.go b/plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer_test.go new file mode 100644 index 0000000000..4ca77b3f61 --- /dev/null +++ b/plugins/processors/awsappsignals/internal/normalizer/attributesnormalizer_test.go @@ -0,0 +1,105 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package normalizer + +import ( + "testing" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.uber.org/zap" +) + +func TestRenameAttributes_for_metric(t *testing.T) { + logger, _ := zap.NewDevelopment() + normalizer := NewAttributesNormalizer(logger) + + // test for metric + // Create a pcommon.Map with some attributes + attributes := pcommon.NewMap() + for originalKey, replacementKey := range renameMapForMetric { + attributes.PutStr(originalKey, replacementKey+"-value") + } + + resourceAttributes := pcommon.NewMap() + // Call the process method + normalizer.renameAttributes(attributes, resourceAttributes, false) + + // Check that the original key has been removed + for originalKey := range renameMapForMetric { + if _, ok := attributes.Get(originalKey); ok { + t.Errorf("originalKey was not removed") + } + } + + // Check that the new key has the correct value + for _, replacementKey := range renameMapForMetric { + if value, ok := attributes.Get(replacementKey); !ok || value.AsString() != replacementKey+"-value" { + t.Errorf("replacementKey has incorrect value: got %v, want %v", value.AsString(), replacementKey+"-value") + } + } +} + +func TestRenameAttributes_for_trace(t *testing.T) { + logger, _ := zap.NewDevelopment() + normalizer := NewAttributesNormalizer(logger) + + // test for trace + // Create a pcommon.Map with some attributes + resourceAttributes := pcommon.NewMap() + for originalKey, replacementKey := range renameMapForTrace { + resourceAttributes.PutStr(originalKey, replacementKey+"-value") + } + resourceAttributes.PutStr("host.id", "i-01ef7d37f42caa168") + + attributes := pcommon.NewMap() + // Call the process method + normalizer.renameAttributes(attributes, resourceAttributes, true) + + // Check that the original key has been removed + for originalKey := range renameMapForTrace { + if _, ok := resourceAttributes.Get(originalKey); ok { + t.Errorf("originalKey was not removed") + } + } + + // Check that the new key has the correct value + for _, replacementKey := range renameMapForTrace { + if value, ok := resourceAttributes.Get(replacementKey); !ok || value.AsString() != replacementKey+"-value" { + t.Errorf("replacementKey has incorrect value: got %v, want %v", value.AsString(), replacementKey+"-value") + } + } + + if value, ok := resourceAttributes.Get("K8s.Node"); !ok || value.AsString() != "i-01ef7d37f42caa168" { + t.Errorf("replacementKey has incorrect value: got %v, want %v", value.AsString(), "i-01ef7d37f42caa168") + } +} + +func TestCopyResourceAttributesToAttributes(t *testing.T) { + logger, _ := zap.NewDevelopment() + normalizer := NewAttributesNormalizer(logger) + + // Create a pcommon.Map for resourceAttributes with some attributes + resourceAttributes := pcommon.NewMap() + for resourceAttrKey, attrKey := range copyMapForMetric { + resourceAttributes.PutStr(resourceAttrKey, attrKey+"-value") + } + resourceAttributes.PutStr("host.id", "i-01ef7d37f42caa168") + + // Create a pcommon.Map for attributes + attributes := pcommon.NewMap() + + // Call the process method + normalizer.copyResourceAttributesToAttributes(attributes, resourceAttributes, false) + + // Check that the attribute has been copied correctly + for _, attrKey := range copyMapForMetric { + if value, ok := attributes.Get(attrKey); !ok || value.AsString() != attrKey+"-value" { + t.Errorf("Attribute was not copied correctly: got %v, want %v", value.AsString(), attrKey+"-value") + } + } + + if value, ok := attributes.Get("K8s.Node"); !ok || value.AsString() != "i-01ef7d37f42caa168" { + t.Errorf("Attribute was not copied correctly: got %v, want %v", value.AsString(), "i-01ef7d37f42caa168") + } +} diff --git a/plugins/processors/awsappsignals/internal/resolver/attributesresolver.go b/plugins/processors/awsappsignals/internal/resolver/attributesresolver.go new file mode 100644 index 0000000000..724c43f065 --- /dev/null +++ b/plugins/processors/awsappsignals/internal/resolver/attributesresolver.go @@ -0,0 +1,90 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "context" + "errors" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.uber.org/zap" + + attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/internal/attributes" +) + +var DefaultHostedInAttributes = map[string]string{ + attr.AWSHostedInEnvironment: attr.HostedInEnvironment, +} + +type subResolver interface { + Process(attributes, resourceAttributes pcommon.Map) error + Stop(ctx context.Context) error +} + +type attributesResolver struct { + subResolvers []subResolver +} + +// create a new attributes resolver +func NewAttributesResolver(resolverNames []string, logger *zap.Logger) *attributesResolver { + subResolvers := []subResolver{} + for _, resolverName := range resolverNames { + if resolverName == "eks" { + subResolvers = append(subResolvers, getEksResolver(logger), newEKSHostedInAttributeResolver()) + } else { + subResolvers = append(subResolvers, newHostedInAttributeResolver(DefaultHostedInAttributes)) + } + } + return &attributesResolver{ + subResolvers: subResolvers, + } +} + +// Process the attributes +func (r *attributesResolver) Process(attributes, resourceAttributes pcommon.Map, _ bool) error { + for _, subResolver := range r.subResolvers { + if err := subResolver.Process(attributes, resourceAttributes); err != nil { + return err + } + } + return nil +} + +func (r *attributesResolver) Stop(ctx context.Context) error { + var errs error + for _, subResolver := range r.subResolvers { + if err := subResolver.Stop(ctx); err != nil { + errs = errors.Join(errs, err) + } + } + return errs +} + +type hostedInAttributeResolver struct { + attributeMap map[string]string +} + +func newHostedInAttributeResolver(attributeMap map[string]string) *hostedInAttributeResolver { + return &hostedInAttributeResolver{ + attributeMap: attributeMap, + } +} +func (h *hostedInAttributeResolver) Process(attributes, resourceAttributes pcommon.Map) error { + for attrKey, mappingKey := range h.attributeMap { + if val, ok := resourceAttributes.Get(attrKey); ok { + attributes.PutStr(mappingKey, val.AsString()) + } + } + + if _, ok := resourceAttributes.Get(attr.AWSHostedInEnvironment); !ok { + hostedInEnv := "Generic" + attributes.PutStr(attr.HostedInEnvironment, hostedInEnv) + } + + return nil +} + +func (h *hostedInAttributeResolver) Stop(ctx context.Context) error { + return nil +} diff --git a/plugins/processors/awsappsignals/internal/resolver/attributesresolver_test.go b/plugins/processors/awsappsignals/internal/resolver/attributesresolver_test.go new file mode 100644 index 0000000000..2597c4926f --- /dev/null +++ b/plugins/processors/awsappsignals/internal/resolver/attributesresolver_test.go @@ -0,0 +1,67 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +type MockSubResolver struct { + mock.Mock +} + +func (m *MockSubResolver) Process(attributes, resourceAttributes pcommon.Map) error { + args := m.Called(attributes, resourceAttributes) + return args.Error(0) +} + +func (m *MockSubResolver) Stop(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + +func TestAttributesResolver_Process(t *testing.T) { + attributes := pcommon.NewMap() + resourceAttributes := pcommon.NewMap() + + mockSubResolver1 := new(MockSubResolver) + mockSubResolver1.On("Process", attributes, resourceAttributes).Return(nil) + + mockSubResolver2 := new(MockSubResolver) + mockSubResolver2.On("Process", attributes, resourceAttributes).Return(errors.New("error")) + + r := &attributesResolver{ + subResolvers: []subResolver{mockSubResolver1, mockSubResolver2}, + } + + err := r.Process(attributes, resourceAttributes, true) + assert.Error(t, err) + mockSubResolver1.AssertExpectations(t) + mockSubResolver2.AssertExpectations(t) +} + +func TestAttributesResolver_Stop(t *testing.T) { + ctx := context.Background() + + mockSubResolver1 := new(MockSubResolver) + mockSubResolver1.On("Stop", ctx).Return(nil) + + mockSubResolver2 := new(MockSubResolver) + mockSubResolver2.On("Stop", ctx).Return(errors.New("error")) + + r := &attributesResolver{ + subResolvers: []subResolver{mockSubResolver1, mockSubResolver2}, + } + + err := r.Stop(ctx) + assert.Error(t, err) + mockSubResolver1.AssertExpectations(t) + mockSubResolver2.AssertExpectations(t) +} diff --git a/plugins/processors/awsappsignals/internal/resolver/eks.go b/plugins/processors/awsappsignals/internal/resolver/eks.go new file mode 100644 index 0000000000..5c2dfb65b6 --- /dev/null +++ b/plugins/processors/awsappsignals/internal/resolver/eks.go @@ -0,0 +1,702 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "context" + "errors" + "fmt" + "math/rand" + "net" + "regexp" + "strconv" + "strings" + "sync" + "time" + + mapset "github.com/deckarep/golang-set/v2" + "go.opentelemetry.io/collector/pdata/pcommon" + semconv "go.opentelemetry.io/collector/semconv/v1.17.0" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clientcmd" + + attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/internal/attributes" +) + +const ( + // kubeAllowedStringAlphaNums holds the characters allowed in replicaset names from as parent deployment + // https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/util/rand/rand.go#L121 + kubeAllowedStringAlphaNums = "bcdfghjklmnpqrstvwxz2456789" + + // Deletion delay adjustment: + // Previously, EKS resolver would instantly remove the IP to Service mapping when a pod was destroyed. + // This posed a problem because: + // 1. Metric data is aggregated and emitted every 1 minute. + // 2. If this aggregated metric data, which contains the IP of the now-destroyed pod, arrives + // at the EKS resolver after the IP records have already been deleted, the metric can't be processed correctly. + // + // To mitigate this issue, we've introduced a 2-minute deletion delay. This ensures that any + // metric data that arrives within those 2 minutes, containing the old IP, will still get mapped correctly to a service. + deletionDelay = 2 * time.Minute + + jitterKubernetesAPISeconds = 10 +) + +var DefaultHostedInAttributeMap = map[string]string{ + semconv.AttributeK8SNamespaceName: attr.HostedInK8SNamespace, +} + +var ( + // ReplicaSet name = Deployment name + "-" + up to 10 alphanumeric characters string, if the ReplicaSet was created through a deployment + // The suffix string of the ReplicaSet name is an int32 number (0 to 4,294,967,295) that is cast to a string and then + // mapped to an alphanumeric value with only the following characters allowed: "bcdfghjklmnpqrstvwxz2456789". + // The suffix string length is therefore nondeterministic. The regex accepts a suffix of length 6-10 to account for + // ReplicaSets not managed by deployments that may have similar names. + // Suffix Generation: https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/controller_utils.go#L1201 + // Alphanumeric Mapping: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/util/rand/rand.go#L121) + replicaSetWithDeploymentNamePattern = fmt.Sprintf(`^(.+)-[%s]{6,10}$`, kubeAllowedStringAlphaNums) + deploymentFromReplicaSetPattern = regexp.MustCompile(replicaSetWithDeploymentNamePattern) + // if a pod is launched directly by a replicaSet (with a given name by users), its name has the following pattern: + // Pod name = ReplicaSet name + 5 alphanumeric characters long string + podWithReplicaSetNamePattern = fmt.Sprintf(`^(.+)-[%s]{5}$`, kubeAllowedStringAlphaNums) + replicaSetFromPodPattern = regexp.MustCompile(podWithReplicaSetNamePattern) +) + +type eksResolver struct { + logger *zap.Logger + clientset kubernetes.Interface + ipToPod *sync.Map + podToWorkloadAndNamespace *sync.Map + ipToServiceAndNamespace *sync.Map + serviceAndNamespaceToSelectors *sync.Map + workloadAndNamespaceToLabels *sync.Map + serviceToWorkload *sync.Map // computed from serviceAndNamespaceToSelectors and workloadAndNamespaceToLabels every 1 min + workloadPodCount map[string]int + safeStopCh *safeChannel // trace and metric processors share the same eksResolver and might close the same channel separately +} + +// a safe channel which can be closed multiple times +type safeChannel struct { + sync.Mutex + + ch chan struct{} + closed bool +} + +func (sc *safeChannel) Close() { + sc.Lock() + defer sc.Unlock() + + if !sc.closed { + close(sc.ch) + sc.closed = true + } +} + +var ( + once sync.Once + instance *eksResolver +) + +func jitterSleep(seconds int) { + jitter := time.Duration(rand.Intn(seconds)) * time.Second // nolint:gosec + time.Sleep(jitter) +} + +func attachNamespace(resourceName, namespace string) string { + // character "@" is not allowed in kubernetes resource names: https://unofficial-kubernetes.readthedocs.io/en/latest/concepts/overview/working-with-objects/names/ + return resourceName + "@" + namespace +} + +func getServiceAndNamespace(service *corev1.Service) string { + return attachNamespace(service.Name, service.Namespace) +} + +func extractResourceAndNamespace(serviceOrWorkloadAndNamespace string) (string, string) { + // extract service name and namespace from serviceAndNamespace + parts := strings.Split(serviceOrWorkloadAndNamespace, "@") + if len(parts) != 2 { + return "", "" + } + return parts[0], parts[1] +} + +func extractWorkloadNameFromRS(replicaSetName string) (string, error) { + match := deploymentFromReplicaSetPattern.FindStringSubmatch(replicaSetName) + if match != nil { + return match[1], nil + } + + return "", errors.New("failed to extract workload name from replicatSet name: " + replicaSetName) +} + +func extractWorkloadNameFromPodName(podName string) (string, error) { + match := replicaSetFromPodPattern.FindStringSubmatch(podName) + if match != nil { + return match[1], nil + } + + return "", errors.New("failed to extract workload name from pod name: " + podName) +} + +func getWorkloadAndNamespace(pod *corev1.Pod) string { + var workloadAndNamespace string + if pod.ObjectMeta.OwnerReferences != nil { + for _, ownerRef := range pod.ObjectMeta.OwnerReferences { + if workloadAndNamespace != "" { + break + } + + if ownerRef.Kind == "ReplicaSet" { + if workloadName, err := extractWorkloadNameFromRS(ownerRef.Name); err == nil { + // when the replicaSet is created by a deployment, use deployment name + workloadAndNamespace = attachNamespace(workloadName, pod.Namespace) + } else if workloadName, err := extractWorkloadNameFromPodName(pod.Name); err == nil { + // when the replicaSet is not created by a deployment, use replicaSet name directly + workloadAndNamespace = attachNamespace(workloadName, pod.Namespace) + } + } else if ownerRef.Kind == "StatefulSet" { + workloadAndNamespace = attachNamespace(ownerRef.Name, pod.Namespace) + } else if ownerRef.Kind == "DaemonSet" { + workloadAndNamespace = attachNamespace(ownerRef.Name, pod.Namespace) + } + } + } + + return workloadAndNamespace +} + +// Deleter represents a type that can delete a key from a map after a certain delay. +type Deleter interface { + DeleteWithDelay(m *sync.Map, key interface{}) +} + +// TimedDeleter deletes a key after a specified delay. +type TimedDeleter struct { + Delay time.Duration +} + +func (td *TimedDeleter) DeleteWithDelay(m *sync.Map, key interface{}) { + go func() { + time.Sleep(td.Delay) + m.Delete(key) + }() +} + +func onAddOrUpdateService(obj interface{}, ipToServiceAndNamespace, serviceAndNamespaceToSelectors *sync.Map) { + service := obj.(*corev1.Service) + // service can also have an external IP (or ingress IP) that could be accessed + // this field can be either an IP address (in some edge case) or a hostname (see "EXTERNAL-IP" column in "k get svc" output) + // [ec2-user@ip-172-31-11-104 one-step]$ k get svc -A + // NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + // default pet-clinic-frontend ClusterIP 10.100.216.182 8080/TCP 108m + // default vets-service ClusterIP 10.100.62.167 8083/TCP 108m + // default visits-service ClusterIP 10.100.96.5 8082/TCP 108m + // ingress-nginx default-http-backend ClusterIP 10.100.11.231 80/TCP 108m + // ingress-nginx ingress-nginx LoadBalancer 10.100.154.5 aex7997ece08c435dbd2b912fd5aa5bd-5372117830.xxxxx.elb.amazonaws.com 80:32080/TCP,443:32081/TCP,9113:30410/TCP 108m + // kube-system kube-dns ClusterIP 10.100.0.10 + // + // we ignore such case for now and may need to consider it in the future + if service.Spec.ClusterIP != "" && service.Spec.ClusterIP != "None" { + ipToServiceAndNamespace.Store(service.Spec.ClusterIP, getServiceAndNamespace(service)) + } + labelSet := mapset.NewSet[string]() + for key, value := range service.Spec.Selector { + labelSet.Add(key + "=" + value) + } + if labelSet.Cardinality() > 0 { + serviceAndNamespaceToSelectors.Store(getServiceAndNamespace(service), labelSet) + } +} + +func onDeleteService(obj interface{}, ipToServiceAndNamespace, serviceAndNamespaceToSelectors *sync.Map, deleter Deleter) { + service := obj.(*corev1.Service) + if service.Spec.ClusterIP != "" && service.Spec.ClusterIP != "None" { + deleter.DeleteWithDelay(ipToServiceAndNamespace, service.Spec.ClusterIP) + } + deleter.DeleteWithDelay(serviceAndNamespaceToSelectors, getServiceAndNamespace(service)) +} + +func removeHostNetworkRecords(pod *corev1.Pod, ipToPod *sync.Map, deleter Deleter) { + for _, port := range getHostNetworkPorts(pod) { + deleter.DeleteWithDelay(ipToPod, pod.Status.HostIP+":"+port) + } +} + +func updateHostNetworkRecords(newPod *corev1.Pod, oldPod *corev1.Pod, ipToPod *sync.Map, deleter Deleter) { + newHostIPPorts := make(map[string]bool) + oldHostIPPorts := make(map[string]bool) + + for _, port := range getHostNetworkPorts(newPod) { + newHostIPPorts[newPod.Status.HostIP+":"+port] = true + } + + for _, port := range getHostNetworkPorts(oldPod) { + oldHostIPPorts[oldPod.Status.HostIP+":"+port] = true + } + + for oldHostIPPort := range oldHostIPPorts { + if _, exist := newHostIPPorts[oldHostIPPort]; !exist { + deleter.DeleteWithDelay(ipToPod, oldHostIPPort) + } + } + + for newHostIPPort := range newHostIPPorts { + if _, exist := oldHostIPPorts[newHostIPPort]; !exist { + ipToPod.Store(newHostIPPort, newPod.Name) + } + } +} + +func handlePodAdd(pod *corev1.Pod, ipToPod *sync.Map) { + if pod.Spec.HostNetwork { + for _, port := range getHostNetworkPorts(pod) { + ipToPod.Store(pod.Status.HostIP+":"+port, pod.Name) + } + } else if pod.Status.PodIP != "" { + ipToPod.Store(pod.Status.PodIP, pod.Name) + } +} + +func handlePodUpdate(newPod *corev1.Pod, oldPod *corev1.Pod, ipToPod *sync.Map, deleter Deleter) { + if oldPod.Spec.HostNetwork && newPod.Spec.HostNetwork { + // Case 1: Both oldPod and newPod are using host network + // Here we need to update the host network records accordingly + updateHostNetworkRecords(newPod, oldPod, ipToPod, deleter) + } else if oldPod.Spec.HostNetwork && !newPod.Spec.HostNetwork { + // Case 2: The oldPod was using the host network, but the newPod is not + // Here we remove the old host network records and add new PodIP record if it is not empty + removeHostNetworkRecords(oldPod, ipToPod, deleter) + if newPod.Status.PodIP != "" { + ipToPod.Store(newPod.Status.PodIP, newPod.Name) + } + } else if !oldPod.Spec.HostNetwork && newPod.Spec.HostNetwork { + // Case 3: The oldPod was not using the host network, but the newPod is + // Here we remove the old PodIP record and add new host network records + if oldPod.Status.PodIP != "" { + deleter.DeleteWithDelay(ipToPod, oldPod.Status.PodIP) + } + for _, port := range getHostNetworkPorts(newPod) { + ipToPod.Store(newPod.Status.HostIP+":"+port, newPod.Name) + } + } else if !oldPod.Spec.HostNetwork && !newPod.Spec.HostNetwork && oldPod.Status.PodIP != newPod.Status.PodIP { + // Case 4: Both oldPod and newPod are not using the host network, but the Pod IPs are different + // Here we replace the old PodIP record with the new one + if oldPod.Status.PodIP != "" { + deleter.DeleteWithDelay(ipToPod, oldPod.Status.PodIP) + } + if newPod.Status.PodIP != "" { + ipToPod.Store(newPod.Status.PodIP, newPod.Name) + } + } +} + +func onAddOrUpdatePod(newObj, oldObj interface{}, ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels *sync.Map, workloadPodCount map[string]int, isAdd bool, logger *zap.Logger, deleter Deleter) { + pod := newObj.(*corev1.Pod) + + if isAdd { + handlePodAdd(pod, ipToPod) + } else { + oldPod := oldObj.(*corev1.Pod) + handlePodUpdate(pod, oldPod, ipToPod, deleter) + } + + workloadAndNamespace := getWorkloadAndNamespace(pod) + + if workloadAndNamespace != "" { + podToWorkloadAndNamespace.Store(pod.Name, workloadAndNamespace) + podLabels := mapset.NewSet[string]() + for key, value := range pod.ObjectMeta.Labels { + podLabels.Add(key + "=" + value) + } + if podLabels.Cardinality() > 0 { + workloadAndNamespaceToLabels.Store(workloadAndNamespace, podLabels) + } + if isAdd { + workloadPodCount[workloadAndNamespace]++ + logger.Debug("Added pod", zap.String("pod", pod.Name), zap.String("workload", workloadAndNamespace), zap.Int("count", workloadPodCount[workloadAndNamespace])) + } + } +} + +func onDeletePod(obj interface{}, ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels *sync.Map, workloadPodCount map[string]int, logger *zap.Logger, deleter Deleter) { + pod := obj.(*corev1.Pod) + if pod.Status.PodIP != "" { + deleter.DeleteWithDelay(ipToPod, pod.Status.PodIP) + } else if pod.Status.HostIP != "" { + for _, port := range getHostNetworkPorts(pod) { + deleter.DeleteWithDelay(ipToPod, pod.Status.HostIP+":"+port) + } + } + + if workloadKey, ok := podToWorkloadAndNamespace.Load(pod.Name); ok { + workloadAndNamespace := workloadKey.(string) + workloadPodCount[workloadAndNamespace]-- + logger.Debug("workload pod count", zap.String("workload", workloadAndNamespace), zap.Int("podCount", workloadPodCount[workloadAndNamespace])) + if workloadPodCount[workloadAndNamespace] == 0 { + deleter.DeleteWithDelay(workloadAndNamespaceToLabels, workloadAndNamespace) + } + } + deleter.DeleteWithDelay(podToWorkloadAndNamespace, pod.Name) +} + +type PodWatcher struct { + ipToPod *sync.Map + podToWorkloadAndNamespace *sync.Map + workloadAndNamespaceToLabels *sync.Map + workloadPodCount map[string]int + logger *zap.Logger + informer cache.SharedIndexInformer + deleter Deleter +} + +func NewPodWatcher(logger *zap.Logger, informer cache.SharedIndexInformer, deleter Deleter) *PodWatcher { + return &PodWatcher{ + ipToPod: &sync.Map{}, + podToWorkloadAndNamespace: &sync.Map{}, + workloadAndNamespaceToLabels: &sync.Map{}, + workloadPodCount: make(map[string]int), + logger: logger, + informer: informer, + deleter: deleter, + } +} + +func (p *PodWatcher) Run(stopCh chan struct{}) { + p.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + p.logger.Debug("list and watch for pods: ADD") + onAddOrUpdatePod(obj, nil, p.ipToPod, p.podToWorkloadAndNamespace, p.workloadAndNamespaceToLabels, p.workloadPodCount, true, p.logger, p.deleter) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + p.logger.Debug("list and watch for pods: UPDATE") + onAddOrUpdatePod(newObj, oldObj, p.ipToPod, p.podToWorkloadAndNamespace, p.workloadAndNamespaceToLabels, p.workloadPodCount, false, p.logger, p.deleter) + }, + DeleteFunc: func(obj interface{}) { + p.logger.Debug("list and watch for pods: DELETE") + onDeletePod(obj, p.ipToPod, p.podToWorkloadAndNamespace, p.workloadAndNamespaceToLabels, p.workloadPodCount, p.logger, p.deleter) + }, + }) + + go p.informer.Run(stopCh) + +} + +func (p *PodWatcher) WaitForCacheSync(stopCh chan struct{}) { + if !cache.WaitForNamedCacheSync("podWatcher", stopCh, p.informer.HasSynced) { + p.logger.Fatal("timed out waiting for kubernetes pod watcher caches to sync") + } + + p.logger.Info("PodWatcher: Cache synced") +} + +type ServiceWatcher struct { + ipToServiceAndNamespace *sync.Map + serviceAndNamespaceToSelectors *sync.Map + logger *zap.Logger + informer cache.SharedIndexInformer + deleter Deleter +} + +func NewServiceWatcher(logger *zap.Logger, informer cache.SharedIndexInformer, deleter Deleter) *ServiceWatcher { + return &ServiceWatcher{ + ipToServiceAndNamespace: &sync.Map{}, + serviceAndNamespaceToSelectors: &sync.Map{}, + logger: logger, + informer: informer, + deleter: deleter, + } +} + +func (s *ServiceWatcher) Run(stopCh chan struct{}) { + s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + s.logger.Debug("list and watch for services: ADD") + onAddOrUpdateService(obj, s.ipToServiceAndNamespace, s.serviceAndNamespaceToSelectors) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + s.logger.Debug("list and watch for services: UPDATE") + onAddOrUpdateService(newObj, s.ipToServiceAndNamespace, s.serviceAndNamespaceToSelectors) + }, + DeleteFunc: func(obj interface{}) { + s.logger.Debug("list and watch for services: DELETE") + onDeleteService(obj, s.ipToServiceAndNamespace, s.serviceAndNamespaceToSelectors, s.deleter) + }, + }) + go s.informer.Run(stopCh) +} + +func (s *ServiceWatcher) WaitForCacheSync(stopCh chan struct{}) { + if !cache.WaitForNamedCacheSync("serviceWatcher", stopCh, s.informer.HasSynced) { + s.logger.Fatal("timed out waiting for kubernetes service watcher caches to sync") + } + + s.logger.Info("ServiceWatcher: Cache synced") +} + +type ServiceToWorkloadMapper struct { + serviceAndNamespaceToSelectors *sync.Map + workloadAndNamespaceToLabels *sync.Map + serviceToWorkload *sync.Map + logger *zap.Logger + deleter Deleter +} + +func NewServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload *sync.Map, logger *zap.Logger, deleter Deleter) *ServiceToWorkloadMapper { + return &ServiceToWorkloadMapper{ + serviceAndNamespaceToSelectors: serviceAndNamespaceToSelectors, + workloadAndNamespaceToLabels: workloadAndNamespaceToLabels, + serviceToWorkload: serviceToWorkload, + logger: logger, + deleter: deleter, + } +} + +func (m *ServiceToWorkloadMapper) MapServiceToWorkload() { + m.logger.Debug("Map service to workload at:", zap.Time("time", time.Now())) + + m.serviceAndNamespaceToSelectors.Range(func(key, value interface{}) bool { + var workloads []string + serviceAndNamespace := key.(string) + _, serviceNamespace := extractResourceAndNamespace(serviceAndNamespace) + serviceLabels := value.(mapset.Set[string]) + + m.workloadAndNamespaceToLabels.Range(func(workloadKey, labelsValue interface{}) bool { + labels := labelsValue.(mapset.Set[string]) + workloadAndNamespace := workloadKey.(string) + _, workloadNamespace := extractResourceAndNamespace(workloadAndNamespace) + if workloadNamespace == serviceNamespace && workloadNamespace != "" && serviceLabels.IsSubset(labels) { + m.logger.Debug("Found workload for service", zap.String("service", serviceAndNamespace), zap.String("workload", workloadAndNamespace)) + workloads = append(workloads, workloadAndNamespace) + } + + return true + }) + + if len(workloads) > 1 { + m.logger.Info("Multiple workloads found for service. You will get unexpected results.", zap.String("service", serviceAndNamespace), zap.Strings("workloads", workloads)) + } else if len(workloads) == 1 { + m.serviceToWorkload.Store(serviceAndNamespace, workloads[0]) + } else { + m.logger.Debug("No workload found for service", zap.String("service", serviceAndNamespace)) + m.deleter.DeleteWithDelay(m.serviceToWorkload, serviceAndNamespace) + } + return true + }) +} + +func (m *ServiceToWorkloadMapper) Start(stopCh chan struct{}) { + // do the first mapping immediately + m.MapServiceToWorkload() + m.logger.Debug("First-time map service to workload at:", zap.Time("time", time.Now())) + + go func() { + for { + select { + case <-stopCh: + return + case <-time.After(time.Minute + 30*time.Second): + m.MapServiceToWorkload() + m.logger.Debug("Map service to workload at:", zap.Time("time", time.Now())) + } + } + }() +} + +func getEksResolver(logger *zap.Logger) subResolver { + once.Do(func() { + config, err := clientcmd.BuildConfigFromFlags("", "") + if err != nil { + logger.Fatal("Failed to create config", zap.Error(err)) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + logger.Fatal("Failed to create eks client", zap.Error(err)) + } + + // jitter calls to the kubernetes api + jitterSleep(jitterKubernetesAPISeconds) + + sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0) + podInformer := sharedInformerFactory.Core().V1().Pods().Informer() + serviceInformer := sharedInformerFactory.Core().V1().Services().Informer() + + timedDeleter := &TimedDeleter{Delay: deletionDelay} + podWatcher := NewPodWatcher(logger, podInformer, timedDeleter) + serviceWatcher := NewServiceWatcher(logger, serviceInformer, timedDeleter) + + safeStopCh := &safeChannel{ch: make(chan struct{}), closed: false} + // initialize the pod and service watchers for the cluster + podWatcher.Run(safeStopCh.ch) + serviceWatcher.Run(safeStopCh.ch) + // wait for caches to sync (for once) so that clients knows about the pods and services in the cluster + podWatcher.WaitForCacheSync(safeStopCh.ch) + serviceWatcher.WaitForCacheSync(safeStopCh.ch) + + serviceToWorkload := &sync.Map{} + serviceToWorkloadMapper := NewServiceToWorkloadMapper(serviceWatcher.serviceAndNamespaceToSelectors, podWatcher.workloadAndNamespaceToLabels, serviceToWorkload, logger, timedDeleter) + serviceToWorkloadMapper.Start(safeStopCh.ch) + + instance = &eksResolver{ + logger: logger, + clientset: clientset, + ipToServiceAndNamespace: serviceWatcher.ipToServiceAndNamespace, + serviceAndNamespaceToSelectors: serviceWatcher.serviceAndNamespaceToSelectors, + ipToPod: podWatcher.ipToPod, + podToWorkloadAndNamespace: podWatcher.podToWorkloadAndNamespace, + workloadAndNamespaceToLabels: podWatcher.workloadAndNamespaceToLabels, + serviceToWorkload: serviceToWorkload, + workloadPodCount: podWatcher.workloadPodCount, + safeStopCh: safeStopCh, + } + }) + + return instance +} + +func (e *eksResolver) Stop(_ context.Context) error { + e.safeStopCh.Close() + return nil +} + +// add a method to eksResolver +func (e *eksResolver) GetWorkloadAndNamespaceByIP(ip string) (string, string, error) { + var workload, namespace string + if podKey, ok := e.ipToPod.Load(ip); ok { + pod := podKey.(string) + if workloadKey, ok := e.podToWorkloadAndNamespace.Load(pod); ok { + workload, namespace = extractResourceAndNamespace(workloadKey.(string)) + return workload, namespace, nil + } + } + + if serviceKey, ok := e.ipToServiceAndNamespace.Load(ip); ok { + serviceAndNamespace := serviceKey.(string) + if workloadKey, ok := e.serviceToWorkload.Load(serviceAndNamespace); ok { + workload, namespace = extractResourceAndNamespace(workloadKey.(string)) + return workload, namespace, nil + } + } + + return "", "", errors.New("no EKS workload found for ip: " + ip) +} + +func (e *eksResolver) Process(attributes, resourceAttributes pcommon.Map) error { + if value, ok := attributes.Get(attr.AWSRemoteService); ok { + valueStr := value.AsString() + ipStr := "" + if ip, _, ok := extractIPPort(valueStr); ok { + if workload, namespace, err := e.GetWorkloadAndNamespaceByIP(valueStr); err == nil { + attributes.PutStr(attr.AWSRemoteService, workload) + attributes.PutStr(attr.K8SRemoteNamespace, namespace) + } else { + ipStr = ip + } + } else if isIP(valueStr) { + ipStr = valueStr + } + + if ipStr != "" { + if workload, namespace, err := e.GetWorkloadAndNamespaceByIP(ipStr); err == nil { + attributes.PutStr(attr.AWSRemoteService, workload) + attributes.PutStr(attr.K8SRemoteNamespace, namespace) + } else { + e.logger.Debug("failed to Process ip", zap.String("ip", ipStr), zap.Error(err)) + attributes.PutStr(attr.AWSRemoteService, "UnknownRemoteService") + } + } + } + + return nil +} + +func isIP(ipString string) bool { + ip := net.ParseIP(ipString) + return ip != nil +} + +const IP_PORT_PATTERN = `^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d+)$` + +var ipPortRegex = regexp.MustCompile(IP_PORT_PATTERN) + +func extractIPPort(ipPort string) (string, string, bool) { + match := ipPortRegex.MatchString(ipPort) + + if !match { + return "", "", false + } + + result := ipPortRegex.FindStringSubmatch(ipPort) + if len(result) != 3 { + return "", "", false + } + + ip := result[1] + port := result[2] + + return ip, port, true +} + +func getHostNetworkPorts(pod *corev1.Pod) []string { + var ports []string + if !pod.Spec.HostNetwork { + return ports + } + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + if port.HostPort != 0 { + ports = append(ports, strconv.Itoa(int(port.HostPort))) + } + } + } + return ports +} + +type eksHostedInAttributeResolver struct { + clusterName string + attributeMap map[string]string +} + +func newEKSHostedInAttributeResolver() *eksHostedInAttributeResolver { + return &eksHostedInAttributeResolver{ + attributeMap: map[string]string{ + semconv.AttributeK8SNamespaceName: attr.HostedInK8SNamespace, + }, + } +} +func (h *eksHostedInAttributeResolver) Process(attributes, resourceAttributes pcommon.Map) error { + for attrKey, mappingKey := range h.attributeMap { + if val, ok := resourceAttributes.Get(attrKey); ok { + attributes.PutStr(mappingKey, val.AsString()) + } + } + + if h.clusterName != "" { + attributes.PutStr(attr.HostedInClusterName, h.clusterName) + } else { + platform, _ := resourceAttributes.Get(semconv.AttributeCloudProvider) + if platform.AsString() == semconv.AttributeCloudProviderAWS { + // iterate resource attributes to find the cluster name + resourceAttributes.Range(func(key string, value pcommon.Value) bool { + if strings.HasPrefix(key, "ec2.tag.kubernetes.io/cluster/") && value.Type() == pcommon.ValueTypeStr && value.AsString() == "owned" { + h.clusterName = strings.TrimPrefix(key, "ec2.tag.kubernetes.io/cluster/") + attributes.PutStr(attr.HostedInClusterName, h.clusterName) + return false + } + return true + }) + } + } + + return nil +} + +func (h *eksHostedInAttributeResolver) Stop(ctx context.Context) error { + return nil +} diff --git a/plugins/processors/awsappsignals/internal/resolver/eks_test.go b/plugins/processors/awsappsignals/internal/resolver/eks_test.go new file mode 100644 index 0000000000..5f92a55f80 --- /dev/null +++ b/plugins/processors/awsappsignals/internal/resolver/eks_test.go @@ -0,0 +1,1131 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resolver + +import ( + "context" + "strings" + "sync" + "testing" + "time" + + mapset "github.com/deckarep/golang-set/v2" + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + attr "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/internal/attributes" +) + +// MockDeleter deletes a key immediately, useful for testing. +type MockDeleter struct{} + +func (md *MockDeleter) DeleteWithDelay(m *sync.Map, key interface{}) { + m.Delete(key) +} + +var mockDeleter = &MockDeleter{} + +// TestAttachNamespace function +func TestAttachNamespace(t *testing.T) { + result := attachNamespace("testResource", "testNamespace") + if result != "testResource@testNamespace" { + t.Errorf("attachNamespace was incorrect, got: %s, want: %s.", result, "testResource@testNamespace") + } +} + +// TestGetServiceAndNamespace function +func TestGetServiceAndNamespace(t *testing.T) { + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testService", + Namespace: "testNamespace", + }, + } + result := getServiceAndNamespace(service) + if result != "testService@testNamespace" { + t.Errorf("getServiceAndNamespace was incorrect, got: %s, want: %s.", result, "testService@testNamespace") + } +} + +// TestExtractResourceAndNamespace function +func TestExtractResourceAndNamespace(t *testing.T) { + // Test normal case + name, namespace := extractResourceAndNamespace("testService@testNamespace") + if name != "testService" || namespace != "testNamespace" { + t.Errorf("extractResourceAndNamespace was incorrect, got: %s and %s, want: %s and %s.", name, namespace, "testService", "testNamespace") + } + + // Test invalid case + name, namespace = extractResourceAndNamespace("invalid") + if name != "" || namespace != "" { + t.Errorf("extractResourceAndNamespace was incorrect, got: %s and %s, want: %s and %s.", name, namespace, "", "") + } +} + +func TestExtractWorkloadNameFromRS(t *testing.T) { + testCases := []struct { + name string + replicaSetName string + want string + shouldErr bool + }{ + { + name: "Valid ReplicaSet Name", + replicaSetName: "my-deployment-5859ffc7ff", + want: "my-deployment", + shouldErr: false, + }, + { + name: "Invalid ReplicaSet Name - No Hyphen", + replicaSetName: "mydeployment5859ffc7ff", + want: "", + shouldErr: true, + }, + { + name: "Invalid ReplicaSet Name - Less Than 10 Suffix Characters", + replicaSetName: "my-deployment-bc2", + want: "", + shouldErr: true, + }, + { + name: "Invalid ReplicaSet Name - More Than 10 Suffix Characters", + replicaSetName: "my-deployment-5859ffc7ffx", + want: "", + shouldErr: true, + }, + { + name: "Invalid ReplicaSet Name - Invalid Characters in Suffix", + replicaSetName: "my-deployment-aeiou12345", + want: "", + shouldErr: true, + }, + { + name: "Invalid ReplicaSet Name - Empty String", + replicaSetName: "", + want: "", + shouldErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := extractWorkloadNameFromRS(tc.replicaSetName) + + if (err != nil) != tc.shouldErr { + t.Errorf("extractWorkloadNameFromRS() error = %v, wantErr %v", err, tc.shouldErr) + return + } + + if got != tc.want { + t.Errorf("extractWorkloadNameFromRS() = %v, want %v", got, tc.want) + } + }) + } +} + +func TestExtractWorkloadNameFromPodName(t *testing.T) { + testCases := []struct { + name string + podName string + want string + shouldErr bool + }{ + { + name: "Valid Pod Name", + podName: "my-replicaset-bc24f", + want: "my-replicaset", + shouldErr: false, + }, + { + name: "Invalid Pod Name - No Hyphen", + podName: "myreplicasetbc24f", + want: "", + shouldErr: true, + }, + { + name: "Invalid Pod Name - Less Than 5 Suffix Characters", + podName: "my-replicaset-bc2", + want: "", + shouldErr: true, + }, + { + name: "Invalid Pod Name - More Than 5 Suffix Characters", + podName: "my-replicaset-bc24f5", + want: "", + shouldErr: true, + }, + { + name: "Invalid Pod Name - Empty String", + podName: "", + want: "", + shouldErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := extractWorkloadNameFromPodName(tc.podName) + + if (err != nil) != tc.shouldErr { + t.Errorf("extractWorkloadNameFromPodName() error = %v, wantErr %v", err, tc.shouldErr) + return + } + + if got != tc.want { + t.Errorf("extractWorkloadNameFromPodName() = %v, want %v", got, tc.want) + } + }) + } +} + +// TestGetWorkloadAndNamespace function +func TestGetWorkloadAndNamespace(t *testing.T) { + // Test ReplicaSet case + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPod", + Namespace: "testNamespace", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testDeployment-5d68bc5f49", + }, + }, + }, + } + result := getWorkloadAndNamespace(pod) + if result != "testDeployment@testNamespace" { + t.Errorf("getDeploymentAndNamespace was incorrect, got: %s, want: %s.", result, "testDeployment@testNamespace") + } + + // Test StatefulSet case + pod.ObjectMeta.OwnerReferences[0].Kind = "StatefulSet" + pod.ObjectMeta.OwnerReferences[0].Name = "testStatefulSet" + result = getWorkloadAndNamespace(pod) + if result != "testStatefulSet@testNamespace" { + t.Errorf("getWorkloadAndNamespace was incorrect, got: %s, want: %s.", result, "testStatefulSet@testNamespace") + } + + // Test Other case + pod.ObjectMeta.OwnerReferences[0].Kind = "Other" + pod.ObjectMeta.OwnerReferences[0].Name = "testOther" + result = getWorkloadAndNamespace(pod) + if result != "" { + t.Errorf("getWorkloadAndNamespace was incorrect, got: %s, want: %s.", result, "") + } + + // Test no OwnerReferences case + pod.ObjectMeta.OwnerReferences = nil + result = getWorkloadAndNamespace(pod) + if result != "" { + t.Errorf("getWorkloadAndNamespace was incorrect, got: %s, want: %s.", result, "") + } +} + +func TestServiceToWorkloadMapper_MapServiceToWorkload(t *testing.T) { + logger, _ := zap.NewDevelopment() + + serviceAndNamespaceToSelectors := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + serviceToWorkload := &sync.Map{} + + serviceAndNamespaceToSelectors.Store("service1@namespace1", mapset.NewSet("label1=value1", "label2=value2")) + workloadAndNamespaceToLabels.Store("deployment1@namespace1", mapset.NewSet("label1=value1", "label2=value2", "label3=value3")) + + mapper := NewServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload, logger, mockDeleter) + mapper.MapServiceToWorkload() + + if _, ok := serviceToWorkload.Load("service1@namespace1"); !ok { + t.Errorf("Expected service1@namespace1 to be mapped to a workload, but it was not") + } +} + +func TestServiceToWorkloadMapper_MapServiceToWorkload_NoWorkload(t *testing.T) { + logger, _ := zap.NewDevelopment() + + serviceAndNamespaceToSelectors := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + serviceToWorkload := &sync.Map{} + + // Add a service with no matching workload + serviceAndNamespace := "service@namespace" + serviceAndNamespaceToSelectors.Store(serviceAndNamespace, mapset.NewSet("label1=value1")) + serviceToWorkload.Store(serviceAndNamespace, "workload@namespace") + + mapper := NewServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload, logger, mockDeleter) + mapper.MapServiceToWorkload() + + // Check that the service was deleted from serviceToWorkload + if _, ok := serviceToWorkload.Load(serviceAndNamespace); ok { + t.Errorf("Service was not deleted from serviceToWorkload") + } +} + +func TestServiceToWorkloadMapper_MapServiceToWorkload_MultipleWorkloads(t *testing.T) { + logger, _ := zap.NewDevelopment() + + serviceAndNamespaceToSelectors := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + serviceToWorkload := &sync.Map{} + + serviceAndNamespace := "service@namespace" + serviceAndNamespaceToSelectors.Store(serviceAndNamespace, mapset.NewSet("label1=value1", "label2=value2")) + + // Add two workloads with matching labels to the service + workloadAndNamespaceToLabels.Store("workload1@namespace", mapset.NewSet("label1=value1", "label2=value2", "label3=value3")) + workloadAndNamespaceToLabels.Store("workload2@namespace", mapset.NewSet("label1=value1", "label2=value2", "label4=value4")) + + mapper := NewServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload, logger, mockDeleter) + mapper.MapServiceToWorkload() + + // Check that the service does not map to any workload + if _, ok := serviceToWorkload.Load(serviceAndNamespace); ok { + t.Errorf("Unexpected mapping of service to multiple workloads") + } +} + +func TestMapServiceToWorkload_StopsWhenSignaled(t *testing.T) { + logger, _ := zap.NewDevelopment() + + serviceAndNamespaceToSelectors := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + serviceToWorkload := &sync.Map{} + + stopchan := make(chan struct{}) + + // Signal the stopchan to stop after 100 milliseconds + time.AfterFunc(100*time.Millisecond, func() { + close(stopchan) + }) + + mapper := NewServiceToWorkloadMapper(serviceAndNamespaceToSelectors, workloadAndNamespaceToLabels, serviceToWorkload, logger, mockDeleter) + + start := time.Now() + mapper.Start(stopchan) + duration := time.Since(start) + + // Check that the function stopped in a reasonable time after the stop signal + if duration > 200*time.Millisecond { + t.Errorf("mapServiceToWorkload did not stop in a reasonable time after the stop signal, duration: %v", duration) + } +} + +func TestOnAddOrUpdateService(t *testing.T) { + // Create a fake service + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myservice", + Namespace: "mynamespace", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "1.2.3.4", + Selector: map[string]string{ + "app": "myapp", + }, + }, + } + + // Create the maps + ipToServiceAndNamespace := &sync.Map{} + serviceAndNamespaceToSelectors := &sync.Map{} + + // Call the function + onAddOrUpdateService(service, ipToServiceAndNamespace, serviceAndNamespaceToSelectors) + + // Check that the maps contain the expected entries + if _, ok := ipToServiceAndNamespace.Load("1.2.3.4"); !ok { + t.Errorf("ipToServiceAndNamespace does not contain the service IP") + } + if _, ok := serviceAndNamespaceToSelectors.Load("myservice@mynamespace"); !ok { + t.Errorf("serviceAndNamespaceToSelectors does not contain the service") + } +} + +func TestOnDeleteService(t *testing.T) { + // Create a fake service + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myservice", + Namespace: "mynamespace", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "1.2.3.4", + Selector: map[string]string{ + "app": "myapp", + }, + }, + } + + // Create the maps and add the service to them + ipToServiceAndNamespace := &sync.Map{} + ipToServiceAndNamespace.Store("1.2.3.4", "myservice@mynamespace") + serviceAndNamespaceToSelectors := &sync.Map{} + serviceAndNamespaceToSelectors.Store("myservice@mynamespace", mapset.NewSet("app=myapp")) + + // Call the function + onDeleteService(service, ipToServiceAndNamespace, serviceAndNamespaceToSelectors, mockDeleter) + + // Check that the maps do not contain the service + if _, ok := ipToServiceAndNamespace.Load("1.2.3.4"); ok { + t.Errorf("ipToServiceAndNamespace still contains the service IP") + } + if _, ok := serviceAndNamespaceToSelectors.Load("myservice@mynamespace"); ok { + t.Errorf("serviceAndNamespaceToSelectors still contains the service") + } +} + +func TestOnAddOrUpdatePod(t *testing.T) { + logger, _ := zap.NewProduction() + + t.Run("pod with both PodIP and HostIP", func(t *testing.T) { + ipToPod := &sync.Map{} + podToWorkloadAndNamespace := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + workloadPodCount := map[string]int{} + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPod", + Namespace: "testNamespace", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testDeployment-598b89cd8d", + }, + }, + }, + Status: corev1.PodStatus{ + PodIP: "1.2.3.4", + HostIP: "5.6.7.8", + }, + } + + onAddOrUpdatePod(pod, nil, ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, workloadPodCount, true, logger, mockDeleter) + + // Test the mappings in ipToPod + if podName, _ := ipToPod.Load("1.2.3.4"); podName != "testPod" { + t.Errorf("ipToPod was incorrect, got: %s, want: %s.", podName, "testPod") + } + + // Test the mapping in podToWorkloadAndNamespace + if depAndNamespace, _ := podToWorkloadAndNamespace.Load("testPod"); depAndNamespace != "testDeployment@testNamespace" { + t.Errorf("podToWorkloadAndNamespace was incorrect, got: %s, want: %s.", depAndNamespace, "testDeployment@testNamespace") + } + + // Test the count in workloadPodCount + if count := workloadPodCount["testDeployment@testNamespace"]; count != 1 { + t.Errorf("workloadPodCount was incorrect, got: %d, want: %d.", count, 1) + } + }) + + t.Run("pod with only HostIP", func(t *testing.T) { + ipToPod := &sync.Map{} + podToWorkloadAndNamespace := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + workloadPodCount := map[string]int{} + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPod", + Namespace: "testNamespace", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testDeployment-7b74958fb8", + }, + }, + }, + Status: corev1.PodStatus{ + HostIP: "5.6.7.8", + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + { + HostPort: int32(8080), + }, + }, + }, + }, + }, + } + + onAddOrUpdatePod(pod, nil, ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, workloadPodCount, true, logger, mockDeleter) + + // Test the mappings in ipToPod + if podName, _ := ipToPod.Load("5.6.7.8:8080"); podName != "testPod" { + t.Errorf("ipToPod was incorrect, got: %s, want: %s.", podName, "testPod") + } + + // Test the mapping in podToWorkloadAndNamespace + if depAndNamespace, _ := podToWorkloadAndNamespace.Load("testPod"); depAndNamespace != "testDeployment@testNamespace" { + t.Errorf("podToWorkloadAndNamespace was incorrect, got: %s, want: %s.", depAndNamespace, "testDeployment@testNamespace") + } + + // Test the count in workloadPodCount + if count := workloadPodCount["testDeployment@testNamespace"]; count != 1 { + t.Errorf("workloadPodCount was incorrect, got: %d, want: %d.", count, 1) + } + }) + + t.Run("pod updated with different set of labels", func(t *testing.T) { + ipToPod := &sync.Map{} + podToWorkloadAndNamespace := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + workloadPodCount := map[string]int{} + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPod", + Namespace: "testNamespace", + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testDeployment-5d68bc5f49", + }, + }, + }, + Status: corev1.PodStatus{ + HostIP: "5.6.7.8", + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + {HostPort: 8080}, + }, + }, + }, + }, + } + + // add the pod + onAddOrUpdatePod(pod, nil, ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, workloadPodCount, true, logger, mockDeleter) + + // Test the mappings in ipToPod + if podName, ok := ipToPod.Load("5.6.7.8:8080"); !ok && podName != "testPod" { + t.Errorf("ipToPod[%s] was incorrect, got: %s, want: %s.", "5.6.7.8:8080", podName, "testPod") + } + + // Test the mapping in workloadAndNamespaceToLabels + labels, _ := workloadAndNamespaceToLabels.Load("testDeployment@testNamespace") + expectedLabels := []string{"label1=value1", "label2=value2"} + for _, label := range expectedLabels { + if !labels.(mapset.Set[string]).Contains(label) { + t.Errorf("deploymentAndNamespaceToLabels was incorrect, got: %v, want: %s.", labels, label) + } + } + + pod2 := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPod", + Namespace: "testNamespace", + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + "label3": "value3", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testDeployment-5d68bc5f49", + }, + }, + }, + Status: corev1.PodStatus{ + PodIP: "1.2.3.4", + HostIP: "5.6.7.8", + }, + } + + // add the pod + onAddOrUpdatePod(pod2, pod, ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, workloadPodCount, false, logger, mockDeleter) + + // Test the mappings in ipToPod + if _, ok := ipToPod.Load("5.6.7.8:8080"); ok { + t.Errorf("ipToPod[%s] should be deleted", "5.6.7.8:8080") + } + + if podName, ok := ipToPod.Load("1.2.3.4"); !ok && podName != "testPod" { + t.Errorf("ipToPod[%s] was incorrect, got: %s, want: %s.", "1.2.3.4", podName, "testPod") + } + // Test the mapping in workloadAndNamespaceToLabels + labels, _ = workloadAndNamespaceToLabels.Load("testDeployment@testNamespace") + expectedLabels = []string{"label1=value1", "label2=value2", "label3=value3"} + for _, label := range expectedLabels { + if !labels.(mapset.Set[string]).Contains(label) { + t.Errorf("workloadAndNamespaceToLabels was incorrect, got: %v, want: %s.", labels, label) + } + } + }) +} + +func TestOnDeletePod(t *testing.T) { + logger, _ := zap.NewProduction() + + t.Run("pod with both PodIP and HostIP", func(t *testing.T) { + ipToPod := &sync.Map{} + podToWorkloadAndNamespace := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + workloadPodCount := map[string]int{} + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPod", + Namespace: "testNamespace", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testDeployment-xyz", + }, + }, + }, + Status: corev1.PodStatus{ + PodIP: "1.2.3.4", + HostIP: "5.6.7.8", + }, + } + + // Assume the pod has already been added + ipToPod.Store(pod.Status.PodIP, pod.Name) + ipToPod.Store(pod.Status.HostIP, pod.Name) + podToWorkloadAndNamespace.Store(pod.Name, "testDeployment@testNamespace") + workloadAndNamespaceToLabels.Store("testDeployment@testNamespace", "testLabels") + workloadPodCount["testDeployment@testNamespace"] = 1 + + onDeletePod(pod, ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, workloadPodCount, logger, mockDeleter) + + // Test if the entries in ipToPod and podToWorkloadAndNamespace have been deleted + if _, ok := ipToPod.Load("1.2.3.4"); ok { + t.Errorf("ipToPod deletion was incorrect, key: %s still exists", "1.2.3.4") + } + + if _, ok := podToWorkloadAndNamespace.Load("testPod"); ok { + t.Errorf("podToWorkloadAndNamespace deletion was incorrect, key: %s still exists", "testPod") + } + + // Test if the count in workloadPodCount has been decremented and the entry in workloadAndNamespaceToLabels has been deleted + if count := workloadPodCount["testDeployment@testNamespace"]; count != 0 { + t.Errorf("workloadPodCount was incorrect, got: %d, want: %d.", count, 0) + } + + if _, ok := workloadAndNamespaceToLabels.Load("testDeployment@testNamespace"); ok { + t.Errorf("workloadAndNamespaceToLabels deletion was incorrect, key: %s still exists", "testDeployment@testNamespace") + } + }) + + t.Run("pod with only HostIP and some network ports", func(t *testing.T) { + ipToPod := &sync.Map{} + podToWorkloadAndNamespace := &sync.Map{} + workloadAndNamespaceToLabels := &sync.Map{} + workloadPodCount := map[string]int{} + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPod", + Namespace: "testNamespace", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "testDeployment-xyz", + }, + }, + }, + Status: corev1.PodStatus{ + HostIP: "5.6.7.8", + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + { + HostPort: int32(8080), + }, + }, + }, + }, + }, + } + + // Assume the pod has already been added + ipToPod.Store(pod.Status.HostIP, pod.Name) + ipToPod.Store(pod.Status.HostIP+":8080", pod.Name) + podToWorkloadAndNamespace.Store(pod.Name, "testDeployment@testNamespace") + workloadAndNamespaceToLabels.Store("testDeployment@testNamespace", "testLabels") + workloadPodCount["testDeployment@testNamespace"] = 1 + + onDeletePod(pod, ipToPod, podToWorkloadAndNamespace, workloadAndNamespaceToLabels, workloadPodCount, logger, mockDeleter) + + // Test if the entries in ipToPod and podToWorkloadAndNamespace have been deleted + if _, ok := ipToPod.Load("5.6.7.8:8080"); ok { + t.Errorf("ipToPod deletion was incorrect, key: %s still exists", "5.6.7.8:8080") + } + + if _, ok := podToWorkloadAndNamespace.Load("testPod"); ok { + t.Errorf("podToDeploymentAndNamespace deletion was incorrect, key: %s still exists", "testPod") + } + + // Test if the count in workloadPodCount has been decremented and the entry in workloadAndNamespaceToLabels has been deleted + if count := workloadPodCount["testDeployment@testNamespace"]; count != 0 { + t.Errorf("workloadPodCount was incorrect, got: %d, want: %d.", count, 0) + } + + if _, ok := workloadAndNamespaceToLabels.Load("testDeployment@testNamespace"); ok { + t.Errorf("workloadAndNamespaceToLabels deletion was incorrect, key: %s still exists", "testDeployment@testNamespace") + } + }) +} + +func TestEksResolver(t *testing.T) { + logger, _ := zap.NewProduction() + ctx := context.Background() + + t.Run("Test GetWorkloadAndNamespaceByIP", func(t *testing.T) { + resolver := &eksResolver{ + logger: logger, + ipToPod: &sync.Map{}, + podToWorkloadAndNamespace: &sync.Map{}, + ipToServiceAndNamespace: &sync.Map{}, + serviceToWorkload: &sync.Map{}, + } + + ip := "1.2.3.4" + pod := "testPod" + workloadAndNamespace := "testDeployment@testNamespace" + + // Pre-fill the resolver maps + resolver.ipToPod.Store(ip, pod) + resolver.podToWorkloadAndNamespace.Store(pod, workloadAndNamespace) + + // Test existing IP + workload, namespace, err := resolver.GetWorkloadAndNamespaceByIP(ip) + if err != nil || workload != "testDeployment" || namespace != "testNamespace" { + t.Errorf("Expected testDeployment@testNamespace, got %s@%s, error: %v", workload, namespace, err) + } + + // Test non-existing IP + _, _, err = resolver.GetWorkloadAndNamespaceByIP("5.6.7.8") + if err == nil || !strings.Contains(err.Error(), "no EKS workload found for ip: 5.6.7.8") { + t.Errorf("Expected error, got %v", err) + } + + // Test ip in ipToServiceAndNamespace but not in ipToPod + newIP := "2.3.4.5" + serviceAndNamespace := "testService@testNamespace" + resolver.ipToServiceAndNamespace.Store(newIP, serviceAndNamespace) + resolver.serviceToWorkload.Store(serviceAndNamespace, workloadAndNamespace) + workload, namespace, err = resolver.GetWorkloadAndNamespaceByIP(newIP) + if err != nil || workload != "testDeployment" || namespace != "testNamespace" { + t.Errorf("Expected testDeployment@testNamespace, got %s@%s, error: %v", workload, namespace, err) + } + }) + + t.Run("Test Stop", func(t *testing.T) { + resolver := &eksResolver{ + logger: logger, + safeStopCh: &safeChannel{ch: make(chan struct{}), closed: false}, + } + + err := resolver.Stop(ctx) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + + if !resolver.safeStopCh.closed { + t.Errorf("Expected channel to be closed") + } + + // Test closing again + err = resolver.Stop(ctx) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + }) + + t.Run("Test Process", func(t *testing.T) { + // helper function to get string values from the attributes + getStrAttr := func(attributes pcommon.Map, key string, t *testing.T) string { + if value, ok := attributes.Get(key); ok { + return value.AsString() + } else { + t.Errorf("Failed to get value for key: %s", key) + return "" + } + } + + logger, _ := zap.NewProduction() + resolver := &eksResolver{ + logger: logger, + ipToPod: &sync.Map{}, + podToWorkloadAndNamespace: &sync.Map{}, + ipToServiceAndNamespace: &sync.Map{}, + serviceToWorkload: &sync.Map{}, + } + + // Test case 1: "aws.remote.service" contains IP:Port + attributes := pcommon.NewMap() + attributes.PutStr(attr.AWSRemoteService, "192.0.2.1:8080") + resourceAttributes := pcommon.NewMap() + resolver.ipToPod.Store("192.0.2.1:8080", "test-pod") + resolver.podToWorkloadAndNamespace.Store("test-pod", "test-deployment@test-namespace") + err := resolver.Process(attributes, resourceAttributes) + assert.NoError(t, err) + assert.Equal(t, "test-deployment", getStrAttr(attributes, attr.AWSRemoteService, t)) + assert.Equal(t, "test-namespace", getStrAttr(attributes, attr.K8SRemoteNamespace, t)) + + // Test case 2: "aws.remote.service" contains only IP + attributes = pcommon.NewMap() + attributes.PutStr(attr.AWSRemoteService, "192.0.2.2") + resourceAttributes = pcommon.NewMap() + resolver.ipToPod.Store("192.0.2.2", "test-pod-2") + resolver.podToWorkloadAndNamespace.Store("test-pod-2", "test-deployment-2@test-namespace-2") + err = resolver.Process(attributes, resourceAttributes) + assert.NoError(t, err) + assert.Equal(t, "test-deployment-2", getStrAttr(attributes, attr.AWSRemoteService, t)) + assert.Equal(t, "test-namespace-2", getStrAttr(attributes, attr.K8SRemoteNamespace, t)) + + // Test case 3: "aws.remote.service" contains non-ip string + attributes = pcommon.NewMap() + attributes.PutStr(attr.AWSRemoteService, "not-an-ip") + resourceAttributes = pcommon.NewMap() + err = resolver.Process(attributes, resourceAttributes) + assert.NoError(t, err) + assert.Equal(t, "not-an-ip", getStrAttr(attributes, attr.AWSRemoteService, t)) + + // Test case 4: Process with valid IP but GetWorkloadAndNamespaceByIP returns error + attributes = pcommon.NewMap() + attributes.PutStr(attr.AWSRemoteService, "192.168.1.2") + resourceAttributes = pcommon.NewMap() + err = resolver.Process(attributes, resourceAttributes) + assert.NoError(t, err) + assert.Equal(t, "UnknownRemoteService", getStrAttr(attributes, attr.AWSRemoteService, t)) + }) +} + +func TestHostedInEksResolver(t *testing.T) { + // helper function to get string values from the attributes + getStrAttr := func(attributes pcommon.Map, key string, t *testing.T) string { + if value, ok := attributes.Get(key); ok { + return value.AsString() + } else { + t.Errorf("Failed to get value for key: %s", key) + return "" + } + } + + resolver := newEKSHostedInAttributeResolver() + + // Test case 1 and 2: resourceAttributes contains "k8s.namespace.name" and EKS cluster name + attributes := pcommon.NewMap() + resourceAttributes := pcommon.NewMap() + resourceAttributes.PutStr("cloud.provider", "aws") + resourceAttributes.PutStr("k8s.namespace.name", "test-namespace-3") + resourceAttributes.PutStr("ec2.tag.kubernetes.io/cluster/test-cluster", "owned") + err := resolver.Process(attributes, resourceAttributes) + assert.NoError(t, err) + assert.Equal(t, "test-namespace-3", getStrAttr(attributes, attr.HostedInK8SNamespace, t)) + assert.Equal(t, "test-cluster", getStrAttr(attributes, attr.HostedInClusterName, t)) +} + +func TestExtractIPPort(t *testing.T) { + // Test valid IP:Port + ip, port, ok := extractIPPort("192.0.2.0:8080") + assert.Equal(t, "192.0.2.0", ip) + assert.Equal(t, "8080", port) + assert.True(t, ok) + + // Test invalid IP:Port + ip, port, ok = extractIPPort("192.0.2:8080") + assert.Equal(t, "", ip) + assert.Equal(t, "", port) + assert.False(t, ok) + + // Test IP only + ip, port, ok = extractIPPort("192.0.2.0") + assert.Equal(t, "", ip) + assert.Equal(t, "", port) + assert.False(t, ok) +} + +func TestGetHostNetworkPorts(t *testing.T) { + // Test Pod with no ports + pod := &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {}, + }, + }, + } + assert.Empty(t, getHostNetworkPorts(pod)) + + // Test Pod with one port + pod = &corev1.Pod{ + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + {HostPort: 8080}, + }, + }, + }, + }, + } + assert.Equal(t, []string{"8080"}, getHostNetworkPorts(pod)) + + // Test Pod with multiple ports + pod = &corev1.Pod{ + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + {HostPort: 8080}, + {HostPort: 8081}, + }, + }, + }, + }, + } + assert.Equal(t, []string{"8080", "8081"}, getHostNetworkPorts(pod)) +} + +func TestHandlePodUpdate(t *testing.T) { + testCases := []struct { + name string + oldPod *corev1.Pod + newPod *corev1.Pod + initialIPToPod map[string]string + expectedIPToPod map[string]string + }{ + { + name: "Old and New Pod Use Host Network, Different Ports", + oldPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + }, + Status: corev1.PodStatus{ + HostIP: "192.168.1.1", + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + { + HostPort: 8000, + }, + }, + }, + }, + }, + }, + newPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + }, + Status: corev1.PodStatus{ + HostIP: "192.168.1.1", + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + { + HostPort: 8080, + }, + }, + }, + }, + }, + }, + initialIPToPod: map[string]string{ + "192.168.1.1:8000": "mypod", + }, + expectedIPToPod: map[string]string{ + "192.168.1.1:8080": "mypod", + }, + }, + // ...other test cases... + { + name: "Old Pod Uses Host Network, New Pod Does Not", + oldPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + }, + Status: corev1.PodStatus{ + HostIP: "192.168.1.2", + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + { + HostPort: 8001, + }, + }, + }, + }, + }, + }, + newPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.1", + }, + Spec: corev1.PodSpec{ + HostNetwork: false, + }, + }, + initialIPToPod: map[string]string{ + "192.168.1.2:8001": "mypod", + }, + expectedIPToPod: map[string]string{ + "10.0.0.1": "mypod", + }, + }, + { + name: "Old Pod Does Not Use Host Network, New Pod Does", + oldPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.2", + }, + Spec: corev1.PodSpec{ + HostNetwork: false, + }, + }, + newPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + }, + Status: corev1.PodStatus{ + HostIP: "192.168.1.3", + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + { + HostPort: 8002, + }, + }, + }, + }, + }, + }, + initialIPToPod: map[string]string{ + "10.0.0.2": "mypod", + }, + expectedIPToPod: map[string]string{ + "192.168.1.3:8002": "mypod", + }, + }, + { + name: "Old and New Pod Do Not Use Host Network, Different Pod IPs", + oldPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.3", + }, + Spec: corev1.PodSpec{ + HostNetwork: false, + }, + }, + newPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.4", + }, + Spec: corev1.PodSpec{ + HostNetwork: false, + }, + }, + initialIPToPod: map[string]string{ + "10.0.0.3": "mypod", + }, + expectedIPToPod: map[string]string{ + "10.0.0.4": "mypod", + }, + }, + { + name: "Old Pod Has Empty PodIP, New Pod Does Not Use Host Network, Non-Empty Pod IP", + oldPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + }, + Status: corev1.PodStatus{ + PodIP: "", + }, + Spec: corev1.PodSpec{ + HostNetwork: false, + }, + }, + newPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.5", + }, + Spec: corev1.PodSpec{ + HostNetwork: false, + }, + }, + initialIPToPod: map[string]string{}, + expectedIPToPod: map[string]string{ + "10.0.0.5": "mypod", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ipToPod := &sync.Map{} + // Initialize ipToPod map + for k, v := range tc.initialIPToPod { + ipToPod.Store(k, v) + } + handlePodUpdate(tc.newPod, tc.oldPod, ipToPod, mockDeleter) + + // Now validate that ipToPod map has been updated correctly + for key, expectedValue := range tc.expectedIPToPod { + val, ok := ipToPod.Load(key) + if !ok || val.(string) != expectedValue { + t.Errorf("Expected record for %v to be %v, got %v", key, expectedValue, val) + } + } + // Validate that old keys have been removed + for key := range tc.initialIPToPod { + if _, ok := tc.expectedIPToPod[key]; !ok { + if _, ok := ipToPod.Load(key); ok { + t.Errorf("Expected record for %v to be removed, but it was not", key) + } + } + } + }) + } +} diff --git a/plugins/processors/awsappsignals/processor.go b/plugins/processors/awsappsignals/processor.go new file mode 100644 index 0000000000..4d5ca28e9a --- /dev/null +++ b/plugins/processors/awsappsignals/processor.go @@ -0,0 +1,269 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsappsignals + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/internal/normalizer" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/internal/resolver" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/rules" +) + +const ( + failedToProcessAttribute = "failed to process attributes" + failedToProcessAttributeWithCustomRule = "failed to process attributes with custom rule, will drop the metric" +) + +// this is used to Process some attributes (like IP addresses) to a generic form to reduce high cardinality +type attributesMutator interface { + Process(attributes, resourceAttributes pcommon.Map, isTrace bool) error +} + +type allowListMutator interface { + ShouldBeDropped(attributes pcommon.Map) (bool, error) +} + +type stopper interface { + Stop(context.Context) error +} + +type awsappsignalsprocessor struct { + logger *zap.Logger + config *Config + replaceActions *rules.ReplaceActions + allowlistMutators []allowListMutator + metricMutators []attributesMutator + traceMutators []attributesMutator + stoppers []stopper +} + +func (ap *awsappsignalsprocessor) Start(_ context.Context, _ component.Host) error { + attributesResolver := resolver.NewAttributesResolver(ap.config.Resolvers, ap.logger) + ap.stoppers = []stopper{attributesResolver} + ap.metricMutators = []attributesMutator{attributesResolver} + + attributesNormalizer := normalizer.NewAttributesNormalizer(ap.logger) + ap.metricMutators = []attributesMutator{attributesResolver, attributesNormalizer} + + ap.replaceActions = rules.NewReplacer(ap.config.Rules) + ap.traceMutators = []attributesMutator{attributesResolver, attributesNormalizer, ap.replaceActions} + + keeper := rules.NewKeeper(ap.config.Rules) + ap.allowlistMutators = []allowListMutator{keeper} + + dropper := rules.NewDropper(ap.config.Rules) + ap.allowlistMutators = []allowListMutator{dropper} + + return nil +} + +func (ap *awsappsignalsprocessor) Shutdown(ctx context.Context) error { + for _, stopper := range ap.stoppers { + err := stopper.Stop(ctx) + if err != nil { + ap.logger.Error("failed to stop", zap.Error(err)) + } + } + return nil +} + +func (ap *awsappsignalsprocessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { + rss := td.ResourceSpans() + for i := 0; i < rss.Len(); i++ { + rs := rss.At(i) + ilss := rs.ScopeSpans() + resourceAttributes := rs.Resource().Attributes() + for j := 0; j < ilss.Len(); j++ { + ils := ilss.At(j) + spans := ils.Spans() + for k := 0; k < spans.Len(); k++ { + span := spans.At(k) + for _, Mutator := range ap.traceMutators { + err := Mutator.Process(span.Attributes(), resourceAttributes, true) + if err != nil { + ap.logger.Debug("failed to Process span", zap.Error(err)) + } + } + } + } + } + return td, nil +} + +func (ap *awsappsignalsprocessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { + rms := md.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + rs := rms.At(i) + ilms := rs.ScopeMetrics() + resourceAttributes := rs.Resource().Attributes() + for j := 0; j < ilms.Len(); j++ { + ils := ilms.At(j) + metrics := ils.Metrics() + for k := 0; k < metrics.Len(); k++ { + m := metrics.At(k) + ap.processMetricAttributes(ctx, m, resourceAttributes) + } + } + } + return md, nil +} + +// Attributes are provided for each log and trace, but not at the metric level +// Need to process attributes for every data point within a metric. +func (ap *awsappsignalsprocessor) processMetricAttributes(ctx context.Context, m pmetric.Metric, resourceAttribes pcommon.Map) { + + // This is a lot of repeated code, but since there is no single parent superclass + // between metric data types, we can't use polymorphism. + switch m.Type() { + case pmetric.MetricTypeGauge: + dps := m.Gauge().DataPoints() + for i := 0; i < dps.Len(); i++ { + for _, mutator := range ap.metricMutators { + err := mutator.Process(dps.At(i).Attributes(), resourceAttribes, false) + if err != nil { + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) + } + } + } + dps.RemoveIf(func(d pmetric.NumberDataPoint) bool { + for _, mutator := range ap.allowlistMutators { + shouldBeDropped, err := mutator.ShouldBeDropped(d.Attributes()) + if err != nil { + ap.logger.Debug(failedToProcessAttributeWithCustomRule, zap.Error(err)) + } + if shouldBeDropped { + return true + } + } + return false + }) + for i := 0; i < dps.Len(); i++ { + err := ap.replaceActions.Process(dps.At(i).Attributes(), resourceAttribes, false) + if err != nil { + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) + } + } + case pmetric.MetricTypeSum: + dps := m.Sum().DataPoints() + for i := 0; i < dps.Len(); i++ { + for _, mutator := range ap.metricMutators { + err := mutator.Process(dps.At(i).Attributes(), resourceAttribes, false) + if err != nil { + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) + } + } + } + dps.RemoveIf(func(d pmetric.NumberDataPoint) bool { + for _, mutator := range ap.allowlistMutators { + shouldBeDropped, err := mutator.ShouldBeDropped(d.Attributes()) + if err != nil { + ap.logger.Debug(failedToProcessAttributeWithCustomRule, zap.Error(err)) + } + if shouldBeDropped { + return true + } + } + return false + }) + for i := 0; i < dps.Len(); i++ { + err := ap.replaceActions.Process(dps.At(i).Attributes(), resourceAttribes, false) + if err != nil { + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) + } + } + case pmetric.MetricTypeHistogram: + dps := m.Histogram().DataPoints() + for i := 0; i < dps.Len(); i++ { + for _, mutator := range ap.metricMutators { + err := mutator.Process(dps.At(i).Attributes(), resourceAttribes, false) + if err != nil { + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) + } + } + } + dps.RemoveIf(func(d pmetric.HistogramDataPoint) bool { + for _, mutator := range ap.allowlistMutators { + shouldBeDropped, err := mutator.ShouldBeDropped(d.Attributes()) + if err != nil { + ap.logger.Debug(failedToProcessAttributeWithCustomRule, zap.Error(err)) + } + if shouldBeDropped { + return true + } + } + return false + }) + for i := 0; i < dps.Len(); i++ { + err := ap.replaceActions.Process(dps.At(i).Attributes(), resourceAttribes, false) + if err != nil { + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) + } + } + case pmetric.MetricTypeExponentialHistogram: + dps := m.ExponentialHistogram().DataPoints() + for i := 0; i < dps.Len(); i++ { + for _, mutator := range ap.metricMutators { + err := mutator.Process(dps.At(i).Attributes(), resourceAttribes, false) + if err != nil { + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) + } + } + } + dps.RemoveIf(func(d pmetric.ExponentialHistogramDataPoint) bool { + for _, mutator := range ap.allowlistMutators { + shouldBeDropped, err := mutator.ShouldBeDropped(d.Attributes()) + if err != nil { + ap.logger.Debug(failedToProcessAttributeWithCustomRule, zap.Error(err)) + } + if shouldBeDropped { + return true + } + } + return false + }) + for i := 0; i < dps.Len(); i++ { + err := ap.replaceActions.Process(dps.At(i).Attributes(), resourceAttribes, false) + if err != nil { + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) + } + } + case pmetric.MetricTypeSummary: + dps := m.Summary().DataPoints() + for i := 0; i < dps.Len(); i++ { + for _, mutator := range ap.metricMutators { + err := mutator.Process(dps.At(i).Attributes(), resourceAttribes, false) + if err != nil { + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) + } + } + } + dps.RemoveIf(func(d pmetric.SummaryDataPoint) bool { + for _, mutator := range ap.allowlistMutators { + shouldBeDropped, err := mutator.ShouldBeDropped(d.Attributes()) + if err != nil { + ap.logger.Debug(failedToProcessAttributeWithCustomRule, zap.Error(err)) + } + if shouldBeDropped { + return true + } + } + return false + }) + for i := 0; i < dps.Len(); i++ { + err := ap.replaceActions.Process(dps.At(i).Attributes(), resourceAttribes, false) + if err != nil { + ap.logger.Debug(failedToProcessAttribute, zap.Error(err)) + } + } + default: + ap.logger.Debug("Ignore unknown metric type", zap.String("type", m.Type().String())) + } +} diff --git a/plugins/processors/awsappsignals/rules/common.go b/plugins/processors/awsappsignals/rules/common.go new file mode 100644 index 0000000000..3c8fd06058 --- /dev/null +++ b/plugins/processors/awsappsignals/rules/common.go @@ -0,0 +1,120 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package rules + +import ( + "errors" + "fmt" + + "github.com/gobwas/glob" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +type AllowListAction string + +const ( + AllowListActionKeep AllowListAction = "keep" + AllowListActionDrop AllowListAction = "drop" + AllowListActionReplace AllowListAction = "replace" +) + +type Selector struct { + Dimension string `mapstructure:"dimension"` + Match string `mapstructure:"match"` +} + +type Replacement struct { + TargetDimension string `mapstructure:"target_dimension"` + Value string `mapstructure:"value"` +} + +type Rule struct { + Selectors []Selector `mapstructure:"selectors"` + Replacements []Replacement `mapstructure:"replacements,omitempty"` + Action AllowListAction `mapstructure:"action"` + RuleName string `mapstructure:"rule_name,omitempty"` +} + +type SelectorMatcherItem struct { + Key string + Matcher glob.Glob +} + +type ActionItem struct { + SelectorMatchers []SelectorMatcherItem + Replacements []Replacement `mapstructure:",omitempty"` +} + +var traceKeyMap = map[string]string{ + "Service": "aws.local.service", + "Operation": "aws.local.operation", + "RemoteService": "aws.remote.service", + "RemoteOperation": "aws.remote.operation", +} + +func GetAllowListAction(action string) (AllowListAction, error) { + switch action { + case "drop": + return AllowListActionDrop, nil + case "keep": + return AllowListActionKeep, nil + case "replace": + return AllowListActionReplace, nil + } + return "", errors.New("invalid action in rule") +} + +func getExactKey(metricDimensionKey string, isTrace bool) string { + if !isTrace { + return metricDimensionKey + } + traceDimensionKey, ok := traceKeyMap[metricDimensionKey] + if !ok { + // return original key if there is no matches + return metricDimensionKey + } + return traceDimensionKey +} + +func matchesSelectors(attributes pcommon.Map, selectorMatchers []SelectorMatcherItem, isTrace bool) (bool, error) { + for _, item := range selectorMatchers { + exactKey := getExactKey(item.Key, isTrace) + value, ok := attributes.Get(exactKey) + if !ok { + return false, fmt.Errorf("can not find attribute %q in the datapoint", exactKey) + } + if !item.Matcher.Match(value.AsString()) { + return false, nil + } + } + return true, nil +} + +func generateSelectorMatchers(selectors []Selector) []SelectorMatcherItem { + var selectorMatchers []SelectorMatcherItem + for _, selector := range selectors { + selectorMatcherItem := SelectorMatcherItem{ + selector.Dimension, + glob.MustCompile(selector.Match), + } + selectorMatchers = append(selectorMatchers, selectorMatcherItem) + } + return selectorMatchers +} + +func generateActionDetails(rules []Rule, action AllowListAction) []ActionItem { + var actionItems []ActionItem + for _, rule := range rules { + if rule.Action == action { + var selectorMatchers = generateSelectorMatchers(rule.Selectors) + actionItem := ActionItem{ + selectorMatchers, + rule.Replacements, + } + actionItems = append(actionItems, actionItem) + } + } + + return actionItems +} diff --git a/plugins/processors/awsappsignals/rules/common_test.go b/plugins/processors/awsappsignals/rules/common_test.go new file mode 100644 index 0000000000..e2ab0fe9a1 --- /dev/null +++ b/plugins/processors/awsappsignals/rules/common_test.go @@ -0,0 +1,23 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package rules + +import "go.opentelemetry.io/collector/pdata/pcommon" + +func generateTestAttributes(service string, operation string, remoteService string, remoteOperation string, + isTrace bool) pcommon.Map { + attributes := pcommon.NewMap() + if isTrace { + attributes.PutStr("aws.local.service", service) + attributes.PutStr("aws.local.operation", operation) + attributes.PutStr("aws.remote.service", remoteService) + attributes.PutStr("aws.remote.operation", remoteOperation) + } else { + attributes.PutStr("Service", service) + attributes.PutStr("Operation", operation) + attributes.PutStr("RemoteService", remoteService) + attributes.PutStr("RemoteOperation", remoteOperation) + } + return attributes +} diff --git a/plugins/processors/awsappsignals/rules/dropper.go b/plugins/processors/awsappsignals/rules/dropper.go new file mode 100644 index 0000000000..38d3ed6cc5 --- /dev/null +++ b/plugins/processors/awsappsignals/rules/dropper.go @@ -0,0 +1,35 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package rules + +import "go.opentelemetry.io/collector/pdata/pcommon" + +type DropActions struct { + Actions []ActionItem +} + +func NewDropper(rules []Rule) *DropActions { + return &DropActions{ + Actions: generateActionDetails(rules, AllowListActionDrop), + } +} + +func (d *DropActions) ShouldBeDropped(attributes pcommon.Map) (bool, error) { + // nothing will be dropped if no rule is defined + if d.Actions == nil || len(d.Actions) == 0 { + return false, nil + } + for _, element := range d.Actions { + isMatched, err := matchesSelectors(attributes, element.SelectorMatchers, false) + if isMatched { + // drop the datapoint as one of drop rules is matched + return true, nil + } + if err != nil { + // keep the datapoint as an error occurred in match process + return false, err + } + } + return false, nil +} diff --git a/plugins/processors/awsappsignals/rules/dropper_test.go b/plugins/processors/awsappsignals/rules/dropper_test.go new file mode 100644 index 0000000000..3ff91dea0f --- /dev/null +++ b/plugins/processors/awsappsignals/rules/dropper_test.go @@ -0,0 +1,180 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package rules + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +type TestCaseForDropper struct { + name string + input pcommon.Map + output bool +} + +func TestDropperProcessor(t *testing.T) { + config := []Rule{ + { + Selectors: []Selector{ + { + Dimension: "Operation", + Match: "PUT *", + }, + { + Dimension: "RemoteService", + Match: "customer-test", + }, + }, + Action: "keep", + }, + { + Selectors: []Selector{ + { + Dimension: "RemoteService", + Match: "customer-*", + }, + { + Dimension: "RemoteOperation", + Match: "GET /Owners/*", + }, + }, + Action: "drop", + }, + { + Selectors: []Selector{ + { + Dimension: "Operation", + Match: "PUT /*/pet/*", + }, + { + Dimension: "RemoteService", + Match: "visit-*-service", + }, + }, + Action: "drop", + }, + { + Selectors: []Selector{ + { + Dimension: "Operation", + Match: "* /api/visits/*", + }, + { + Dimension: "RemoteOperation", + Match: "*", + }, + }, + Replacements: []Replacement{ + { + TargetDimension: "RemoteOperation", + Value: "ListPetsByCustomer", + }, + { + TargetDimension: "ResourceTarget", + Value: " ", + }, + }, + Action: "replace", + }, + } + + testDropper := NewDropper(config) + isTrace := false + + testCases := []TestCaseForDropper{ + { + name: "commonTest01ShouldBeKept", + input: generateTestAttributes("customer-test", "GET /user/123", "visit-service", "GET /visit/12345", isTrace), + output: false, + }, + { + name: "commonTest02ShouldBeDropped", + input: generateTestAttributes("common-test", "GET /user/123", "customer-service", "GET /Owners/12345", isTrace), + output: true, + }, + { + name: "commonTest03ShouldBeDropped", + input: generateTestAttributes("common-test", "PUT /test/pet/123", "visit-test-service", "GET /visit/12345", isTrace), + output: true, + }, + } + + for i := range testCases { + tt := testCases[i] + t.Run(tt.name, func(t *testing.T) { + result, err := testDropper.ShouldBeDropped(tt.input) + assert.NoError(t, err) + assert.Equal(t, tt.output, result) + }) + } +} + +func TestDropperProcessorWithNilConfig(t *testing.T) { + testDropper := NewDropper(nil) + isTrace := false + + testCases := []TestCaseForDropper{ + { + name: "nilTest01ShouldBeKept", + input: generateTestAttributes("customer-test", "GET /user/123", "visit-service", "GET /visit/12345", isTrace), + output: false, + }, + { + name: "nilTest02ShouldBeDropped", + input: generateTestAttributes("common-test", "GET /user/123", "customer-service", "GET /Owners/12345", isTrace), + output: false, + }, + { + name: "nilTest03ShouldBeDropped", + input: generateTestAttributes("common-test", "PUT /test/pet/123", "visit-test-service", "GET /visit/12345", isTrace), + output: false, + }, + } + + for i := range testCases { + tt := testCases[i] + t.Run(tt.name, func(t *testing.T) { + result, err := testDropper.ShouldBeDropped(tt.input) + assert.NoError(t, err) + assert.Equal(t, tt.output, result) + }) + } +} + +func TestDropperProcessorWithEmptyConfig(t *testing.T) { + var config []Rule + + testDropper := NewDropper(config) + isTrace := false + + testCases := []TestCaseForDropper{ + { + name: "emptyTest01ShouldBeKept", + input: generateTestAttributes("customer-test", "GET /user/123", "visit-service", "GET /visit/12345", isTrace), + output: false, + }, + { + name: "emptyTest02ShouldBeDropped", + input: generateTestAttributes("common-test", "GET /user/123", "customer-service", "GET /Owners/12345", isTrace), + output: false, + }, + { + name: "emptyTest03ShouldBeDropped", + input: generateTestAttributes("common-test", "PUT /test/pet/123", "visit-test-service", "GET /visit/12345", isTrace), + output: false, + }, + } + + for i := range testCases { + tt := testCases[i] + t.Run(tt.name, func(t *testing.T) { + result, err := testDropper.ShouldBeDropped(tt.input) + assert.NoError(t, err) + assert.Equal(t, tt.output, result) + }) + } +} diff --git a/plugins/processors/awsappsignals/rules/keeper.go b/plugins/processors/awsappsignals/rules/keeper.go new file mode 100644 index 0000000000..c88902a20a --- /dev/null +++ b/plugins/processors/awsappsignals/rules/keeper.go @@ -0,0 +1,35 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package rules + +import "go.opentelemetry.io/collector/pdata/pcommon" + +type KeepActions struct { + Actions []ActionItem +} + +func NewKeeper(rules []Rule) *KeepActions { + return &KeepActions{ + Actions: generateActionDetails(rules, AllowListActionKeep), + } +} + +func (k *KeepActions) ShouldBeDropped(attributes pcommon.Map) (bool, error) { + // nothing will be dropped if no keep rule is defined + if k.Actions == nil || len(k.Actions) == 0 { + return false, nil + } + for _, element := range k.Actions { + isMatched, err := matchesSelectors(attributes, element.SelectorMatchers, false) + if isMatched { + // keep the datapoint as one of the keep rules is matched + return false, nil + } + if err != nil { + // drop the datapoint as an error occurred in match process + return true, err + } + } + return true, nil +} diff --git a/plugins/processors/awsappsignals/rules/keeper_test.go b/plugins/processors/awsappsignals/rules/keeper_test.go new file mode 100644 index 0000000000..8a8dbc4941 --- /dev/null +++ b/plugins/processors/awsappsignals/rules/keeper_test.go @@ -0,0 +1,175 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package rules + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +type TestCaseForKeeper struct { + name string + input pcommon.Map + output bool +} + +func TestKeeperProcessor(t *testing.T) { + config := []Rule{ + { + Selectors: []Selector{ + { + Dimension: "Operation", + Match: "PUT *", + }, + { + Dimension: "RemoteService", + Match: "customer-test", + }, + }, + Action: "keep", + }, + { + Selectors: []Selector{ + { + Dimension: "RemoteService", + Match: "UnknownRemoteService", + }, + { + Dimension: "RemoteOperation", + Match: "GetShardIterator", + }, + }, + Action: "drop", + }, + { + Selectors: []Selector{ + { + Dimension: "Operation", + Match: "* /api/visits/*", + }, + { + Dimension: "RemoteOperation", + Match: "*", + }, + }, + Replacements: []Replacement{ + { + TargetDimension: "RemoteOperation", + Value: "ListPetsByCustomer", + }, + { + TargetDimension: "ResourceTarget", + Value: " ", + }, + }, + Action: "replace", + }, + } + + testKeeper := NewKeeper(config) + isTrace := false + + testCases := []TestCaseForKeeper{ + { + name: "commonTest01ShouldBeKept", + input: generateTestAttributes("visit-test", "PUT owners", "customer-test", "PUT owners", isTrace), + output: false, + }, + { + name: "commonTest02ShouldBeDropped", + input: generateTestAttributes("visit-test", "PUT owners", "vet-test", "PUT owners", isTrace), + output: true, + }, + { + name: "commonTest03ShouldBeDropped", + input: generateTestAttributes("vet-test", "GET owners", "customer-test", "PUT owners", isTrace), + output: true, + }, + } + for i := range testCases { + tt := testCases[i] + t.Run(tt.name, func(t *testing.T) { + result, err := testKeeper.ShouldBeDropped(tt.input) + assert.NoError(t, err) + assert.Equal(t, tt.output, result) + }) + } +} + +func TestKeeperProcessorWithNilConfig(t *testing.T) { + testKeeper := NewKeeper(nil) + isTrace := false + + testCases := []TestCaseForKeeper{ + { + name: "nilTest01ShouldBeKept", + input: generateTestAttributes("visit-test", "PUT owners", "customer-test", "PUT owners", isTrace), + output: false, + }, + { + name: "nilTest02ShouldBeKept", + input: generateTestAttributes("visit-test", "PUT owners", "vet-test", "PUT owners", isTrace), + output: false, + }, + { + name: "nilTest03ShouldBeKept", + input: generateTestAttributes("vet-test", "PUT owners", "visit-test", "PUT owners", isTrace), + output: false, + }, + { + name: "nilTest04ShouldBeKept", + input: generateTestAttributes("customer-test", "PUT owners", "visit-test", "PUT owners", isTrace), + output: false, + }, + } + for i := range testCases { + tt := testCases[i] + t.Run(tt.name, func(t *testing.T) { + result, err := testKeeper.ShouldBeDropped(tt.input) + assert.NoError(t, err) + assert.Equal(t, tt.output, result) + }) + } +} + +func TestKeeperProcessorWithEmptyConfig(t *testing.T) { + + config := []Rule{} + + testKeeper := NewKeeper(config) + isTrace := false + + testCases := []TestCaseForKeeper{ + { + name: "emptyTest01ShouldBeKept", + input: generateTestAttributes("visit-test", "PUT owners", "customer-test", "PUT owners", isTrace), + output: false, + }, + { + name: "emptyTest02ShouldBeKept", + input: generateTestAttributes("visit-test", "PUT owners", "vet-test", "PUT owners", isTrace), + output: false, + }, + { + name: "emptyTest03ShouldBeKept", + input: generateTestAttributes("vet-test", "PUT owners", "visit-test", "PUT owners", isTrace), + output: false, + }, + { + name: "emptyTest04ShouldBeKept", + input: generateTestAttributes("customer-test", "PUT owners", "visit-test", "PUT owners", isTrace), + output: false, + }, + } + for i := range testCases { + tt := testCases[i] + t.Run(tt.name, func(t *testing.T) { + result, err := testKeeper.ShouldBeDropped(tt.input) + assert.NoError(t, err) + assert.Equal(t, tt.output, result) + }) + } +} diff --git a/plugins/processors/awsappsignals/rules/replacer.go b/plugins/processors/awsappsignals/rules/replacer.go new file mode 100644 index 0000000000..7826581c68 --- /dev/null +++ b/plugins/processors/awsappsignals/rules/replacer.go @@ -0,0 +1,54 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package rules + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" +) + +type ReplaceActions struct { + Actions []ActionItem +} + +func NewReplacer(rules []Rule) *ReplaceActions { + return &ReplaceActions{ + generateActionDetails(rules, AllowListActionReplace), + } +} + +func (r *ReplaceActions) Process(attributes, _ pcommon.Map, isTrace bool) error { + // do nothing when there is no replace rule defined + if r.Actions == nil || len(r.Actions) == 0 { + return nil + } + // If there are more than one rule are matched, the last one will be executed(Later one has higher priority) + actions := r.Actions + finalRules := make(map[string]string) + for i := len(actions) - 1; i >= 0; i = i - 1 { + element := actions[i] + isMatched, _ := matchesSelectors(attributes, element.SelectorMatchers, isTrace) + if !isMatched { + continue + } + for _, replacement := range element.Replacements { + targetDimensionKey := getExactKey(replacement.TargetDimension, isTrace) + // don't allow customer add new dimension key + _, isExist := attributes.Get(targetDimensionKey) + if !isExist { + continue + } + // every replacement in one specific dimension only will be performed once + _, ok := finalRules[targetDimensionKey] + if ok { + continue + } + finalRules[targetDimensionKey] = replacement.Value + } + } + + for key, value := range finalRules { + attributes.PutStr(key, value) + } + return nil +} diff --git a/plugins/processors/awsappsignals/rules/replacer_test.go b/plugins/processors/awsappsignals/rules/replacer_test.go new file mode 100644 index 0000000000..7bd71c4211 --- /dev/null +++ b/plugins/processors/awsappsignals/rules/replacer_test.go @@ -0,0 +1,281 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package rules + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +type TestCaseForReplacer struct { + name string + input pcommon.Map + output pcommon.Map + isTrace bool +} + +func TestReplacerProcess(t *testing.T) { + + config := []Rule{ + { + Selectors: []Selector{ + { + Dimension: "Operation", + Match: "PUT *", + }, + { + Dimension: "RemoteService", + Match: "customer-test", + }, + }, + Action: "keep", + }, + { + Selectors: []Selector{ + { + Dimension: "RemoteService", + Match: "UnknownRemoteService", + }, + { + Dimension: "RemoteOperation", + Match: "GetShardIterator", + }, + }, + Action: "drop", + }, + { + Selectors: []Selector{ + { + Dimension: "Operation", + Match: "* /api/visits/*", + }, + { + Dimension: "RemoteOperation", + Match: "*", + }, + }, + Replacements: []Replacement{ + { + TargetDimension: "RemoteOperation", + Value: "ListPetsByCustomer", + }, + { + TargetDimension: "Operation", + Value: "PUT/GET", + }, + }, + Action: "replace", + }, + } + + testReplacer := NewReplacer(config) + testMapPlaceHolder := pcommon.NewMap() + + testCases := []TestCaseForReplacer{ + { + name: "test01TraceMatch", + input: generateTestAttributes("replace-test", "PUT /api/visits/test/123456", "customer-test", + "GET", true), + output: generateTestAttributes("replace-test", "PUT/GET", "customer-test", + "ListPetsByCustomer", true), + isTrace: true, + }, + { + name: "test02TraceNotMatch", + input: generateTestAttributes("replace-test", "PUT /api/customer/owners/12345", "customer-test", + "GET", true), + output: generateTestAttributes("replace-test", "PUT /api/customer/owners/12345", "customer-test", + "GET", true), + isTrace: true, + }, + { + name: "test03MetricMatch", + input: generateTestAttributes("replace-test", "PUT /api/visits/owners/12345", "customer-test", + "GET", false), + output: generateTestAttributes("replace-test", "PUT/GET", "customer-test", + "ListPetsByCustomer", false), + isTrace: false, + }, + { + name: "test04MetricNotMatch", + input: generateTestAttributes("replace-test", "PUT /api/customer/owners/12345", "customer-test", + "GET", false), + output: generateTestAttributes("replace-test", "PUT /api/customer/owners/12345", "customer-test", + "GET", false), + isTrace: false, + }, + } + for i := range testCases { + tt := testCases[i] + t.Run(tt.name, func(t *testing.T) { + assert.NoError(t, testReplacer.Process(tt.input, testMapPlaceHolder, tt.isTrace)) + assert.Equal(t, tt.output, tt.input) + }) + } +} + +func TestReplacerProcessWithPriority(t *testing.T) { + + config := []Rule{ + { + Selectors: []Selector{ + { + Dimension: "Operation", + Match: "* /api/visits/*", + }, + { + Dimension: "RemoteOperation", + Match: "*", + }, + }, + Replacements: []Replacement{ + { + TargetDimension: "RemoteOperation", + Value: "ListPetsByCustomer", + }, + { + TargetDimension: "Operation", + Value: "PUT/GET", + }, + }, + Action: "replace", + }, + { + Selectors: []Selector{ + { + Dimension: "Operation", + Match: "PUT /api/visits/*", + }, + { + Dimension: "RemoteOperation", + Match: "PUT *", + }, + }, + Replacements: []Replacement{ + { + TargetDimension: "RemoteOperation", + Value: "PUT visits", + }, + { + TargetDimension: "Operation", + Value: "PUT", + }, + }, + Action: "replace", + }, + } + + testReplacer := NewReplacer(config) + testMapPlaceHolder := pcommon.NewMap() + + testCases := []TestCaseForReplacer{ + { + name: "test01TraceMatchPreviousOne", + input: generateTestAttributes("replace-test", "PUT /api/visits/test/123456", "customer-test", + "GET", true), + output: generateTestAttributes("replace-test", "PUT/GET", "customer-test", + "ListPetsByCustomer", true), + isTrace: true, + }, + { + name: "test02TraceBothMatch", + input: generateTestAttributes("replace-test", "PUT /api/visits/test/123456", "customer-test", + "PUT /api/owners/123456", true), + output: generateTestAttributes("replace-test", "PUT", "customer-test", + "PUT visits", true), + isTrace: true, + }, + { + name: "test03MetricMatchPreviousOne", + input: generateTestAttributes("replace-test", "PUT /api/visits/owners/12345", "customer-test", + "GET", false), + output: generateTestAttributes("replace-test", "PUT/GET", "customer-test", + "ListPetsByCustomer", false), + isTrace: false, + }, + { + name: "test04MetricBothMatch", + input: generateTestAttributes("replace-test", "PUT /api/visits/owners/12345", "customer-test", + "PUT owners", false), + output: generateTestAttributes("replace-test", "PUT", "customer-test", + "PUT visits", false), + isTrace: false, + }, + } + for i := range testCases { + tt := testCases[i] + t.Run(tt.name, func(t *testing.T) { + assert.NoError(t, testReplacer.Process(tt.input, testMapPlaceHolder, tt.isTrace)) + assert.Equal(t, tt.output, tt.input) + }) + } +} + +func TestReplacerProcessWithNilConfig(t *testing.T) { + + testReplacer := NewReplacer(nil) + testMapPlaceHolder := pcommon.NewMap() + + testCases := []TestCaseForReplacer{ + { + name: "test01Trace", + input: generateTestAttributes("replace-test", "PUT /api/visits/test/123456", "customer-test", + "GET", true), + output: generateTestAttributes("replace-test", "PUT /api/visits/test/123456", "customer-test", + "GET", true), + isTrace: true, + }, + { + name: "test02Metric", + input: generateTestAttributes("replace-test", "PUT /api/visits/owners/12345", "customer-test", + "GET", false), + output: generateTestAttributes("replace-test", "PUT /api/visits/owners/12345", "customer-test", + "GET", false), + isTrace: false, + }, + } + for i := range testCases { + tt := testCases[i] + t.Run(tt.name, func(t *testing.T) { + assert.NoError(t, testReplacer.Process(tt.input, testMapPlaceHolder, tt.isTrace)) + assert.Equal(t, tt.output, tt.input) + }) + } +} + +func TestReplacerProcessWithEmptyConfig(t *testing.T) { + + config := []Rule{} + + testReplacer := NewReplacer(config) + testMapPlaceHolder := pcommon.NewMap() + + testCases := []TestCaseForReplacer{ + { + name: "test01Trace", + input: generateTestAttributes("replace-test", "PUT /api/visits/test/123456", "customer-test", + "GET", true), + output: generateTestAttributes("replace-test", "PUT /api/visits/test/123456", "customer-test", + "GET", true), + isTrace: true, + }, + { + name: "test02Metric", + input: generateTestAttributes("replace-test", "PUT /api/visits/owners/12345", "customer-test", + "GET", false), + output: generateTestAttributes("replace-test", "PUT /api/visits/owners/12345", "customer-test", + "GET", false), + isTrace: false, + }, + } + for i := range testCases { + tt := testCases[i] + t.Run(tt.name, func(t *testing.T) { + assert.NoError(t, testReplacer.Process(tt.input, testMapPlaceHolder, tt.isTrace)) + assert.Equal(t, tt.output, tt.input) + }) + } +} diff --git a/plugins/processors/awsappsignals/testdata/config.yaml b/plugins/processors/awsappsignals/testdata/config.yaml new file mode 100644 index 0000000000..71c5c84e97 --- /dev/null +++ b/plugins/processors/awsappsignals/testdata/config.yaml @@ -0,0 +1,29 @@ +awsappsignals: + resolvers: [eks] + rules: + - selectors: + - dimension: Operation + match: "* /api/visits/*" + - dimension: RemoteOperation + match: "*" + action: keep + rule_name: "keep01" + + - selectors: + - dimension: RemoteService + match: "UnknownRemoteService" + - dimension: RemoteOperation + match: "GetShardIterator" + action: drop + + - selectors: + - dimension: Operation + match: "* /api/visits/*" + - dimension: RemoteOperation + match: "*" + replacements: + - target_dimension: RemoteOperation + value: ListPetsByCustomer + - target_dimension: ResourceTarget + value: ' ' + action: replace \ No newline at end of file diff --git a/service/defaultcomponents/components.go b/service/defaultcomponents/components.go index d6302773cc..602371fa40 100644 --- a/service/defaultcomponents/components.go +++ b/service/defaultcomponents/components.go @@ -7,8 +7,10 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter" + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver" @@ -25,6 +27,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/plugins/outputs/cloudwatch" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" ) @@ -43,10 +46,12 @@ func Factories() (otelcol.Factories, error) { } if factories.Processors, err = processor.MakeFactoryMap( + awsappsignals.NewFactory(), batchprocessor.NewFactory(), cumulativetodeltaprocessor.NewFactory(), ec2tagger.NewFactory(), metricstransformprocessor.NewFactory(), + resourcedetectionprocessor.NewFactory(), transformprocessor.NewFactory(), ); err != nil { return otelcol.Factories{}, err @@ -64,6 +69,7 @@ func Factories() (otelcol.Factories, error) { if factories.Extensions, err = extension.MakeFactoryMap( agenthealth.NewFactory(), + awsproxy.NewFactory(), ); err != nil { return otelcol.Factories{}, err } diff --git a/service/defaultcomponents/components_test.go b/service/defaultcomponents/components_test.go index 2d403603aa..17369775b4 100644 --- a/service/defaultcomponents/components_test.go +++ b/service/defaultcomponents/components_test.go @@ -11,9 +11,9 @@ import ( const ( receiversCount = 5 - processorCount = 5 + processorCount = 7 exportersCount = 5 - extensionsCount = 1 + extensionsCount = 2 ) func TestComponents(t *testing.T) { @@ -29,6 +29,7 @@ func TestComponents(t *testing.T) { processors := factories.Processors assert.Len(t, processors, processorCount) + assert.NotNil(t, processors["awsappsignals"]) assert.NotNil(t, processors["batch"]) assert.NotNil(t, processors["cumulativetodelta"]) assert.NotNil(t, processors["ec2tagger"]) @@ -46,4 +47,5 @@ func TestComponents(t *testing.T) { extensions := factories.Extensions assert.Len(t, extensions, extensionsCount) assert.NotNil(t, extensions["agenthealth"]) + assert.NotNil(t, extensions["awsproxy"]) } diff --git a/translator/config/schema.json b/translator/config/schema.json index 656ec61d1a..d2854942df 100644 --- a/translator/config/schema.json +++ b/translator/config/schema.json @@ -511,6 +511,82 @@ "metrics_collected": { "type": "object", "properties": { + "app_signals": { + "type": "object", + "properties": { + "rules": { + "description": "Custom rules defined by customer", + "type": "array", + "items": { + "type": "object", + "properties": { + "selectors": { + "type": "array", + "items": { + "type": "object", + "properties": { + "dimension": { + "description": "dimension used for matching", + "type": "string", + "minLength": 1 + }, + "match": { + "description": "regex used for match", + "type": "string", + "minLength": 1 + } + }, + "required": [ + "dimension", + "match" + ] + } + }, + "replacements": { + "type": "array", + "items": { + "type": "object", + "properties": { + "target_dimension": { + "description": "dimension to be replaced", + "type": "string", + "minLength": 1 + }, + "value": { + "description": "replacement value", + "type": "string" + } + }, + "required": [ + "target_dimension", + "value" + ] + } + }, + "action": { + "description": "action to be done, either keep, drop or replace", + "type": "string", + "enum": [ + "drop", + "keep", + "replace" + ] + }, + "rule_name": { + "description": "name of rule", + "type": "string", + "minLength": 1 + } + }, + "required": [ + "selectors", + "action" + ] + } + } + }, + "additionalProperties": true + }, "ecs": { "type": "object", "properties": { @@ -759,7 +835,7 @@ "retentionInDaysDefinition": { "type": "integer", "enum": [ - -1, + -1, 1, 3, 5, @@ -812,6 +888,11 @@ "traces_collected": { "type": "object", "properties": { + "app_signals": { + "type": "object", + "properties": {}, + "additionalProperties": true + }, "xray": { "$ref": "#/definitions/tracesDefinition/definitions/xrayDefinition" }, diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_kubernetes_config.conf b/translator/tocwconfig/sampleConfig/appsignals_and_kubernetes_config.conf new file mode 100644 index 0000000000..007bb60efb --- /dev/null +++ b/translator/tocwconfig/sampleConfig/appsignals_and_kubernetes_config.conf @@ -0,0 +1,27 @@ +[agent] + collection_jitter = "0s" + debug = false + flush_interval = "1s" + flush_jitter = "0s" + hostname = "host_name_from_env" + interval = "60s" + logfile = "" + logtarget = "lumberjack" + metric_batch_size = 1000 + metric_buffer_limit = 10000 + omit_hostname = false + precision = "" + quiet = false + round_interval = false + +[inputs] + +[outputs] + + [[outputs.cloudwatchlogs]] + endpoint_override = "https://fake_endpoint" + force_flush_interval = "5s" + log_stream_name = "host_name_from_env" + region = "us-east-1" + +[processors] diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_kubernetes_config.json b/translator/tocwconfig/sampleConfig/appsignals_and_kubernetes_config.json new file mode 100644 index 0000000000..9585df211b --- /dev/null +++ b/translator/tocwconfig/sampleConfig/appsignals_and_kubernetes_config.json @@ -0,0 +1,24 @@ +{ + "agent": { + "region": "us-east-1" + }, + "logs": { + "metrics_collected": { + "app_signals": { + }, + "kubernetes": { + "cluster_name": "TestCluster", + "metrics_collection_interval": 30, + "disable_metric_extraction": true, + "enhanced_container_insights": false + } + }, + "force_flush_interval": 5, + "endpoint_override":"https://fake_endpoint" + }, + "traces": { + "traces_collected": { + "app_signals": {} + } + } +} \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_kubernetes_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_kubernetes_config.yaml new file mode 100644 index 0000000000..87d088acbd --- /dev/null +++ b/translator/tocwconfig/sampleConfig/appsignals_and_kubernetes_config.yaml @@ -0,0 +1,664 @@ +connectors: {} +exporters: + awsemf/app_signals: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: false + eks_fargate_container_insights_enabled: false + endpoint: "" + enhanced_container_insights: false + imds_retries: 0 + local_mode: false + log_group_name: /aws/appsignals/eks + log_retention: 0 + log_stream_name: "" + max_retries: 2 + metric_declarations: + - dimensions: + - - HostedIn.EKS.Cluster + - HostedIn.K8s.Namespace + - Operation + - Service + - - HostedIn.EKS.Cluster + - HostedIn.K8s.Namespace + - Service + label_matchers: + - label_names: + - aws.span.kind + regex: ^(SERVER|LOCAL_ROOT)$ + separator: ; + metric_name_selectors: + - Latency + - Fault + - Error + - dimensions: + - - HostedIn.EKS.Cluster + - HostedIn.K8s.Namespace + - K8s.RemoteNamespace + - Operation + - RemoteOperation + - RemoteService + - RemoteTarget + - Service + - - HostedIn.EKS.Cluster + - HostedIn.K8s.Namespace + - K8s.RemoteNamespace + - Operation + - RemoteOperation + - RemoteService + - Service + - - HostedIn.EKS.Cluster + - HostedIn.K8s.Namespace + - Operation + - RemoteOperation + - RemoteService + - RemoteTarget + - Service + - - HostedIn.EKS.Cluster + - HostedIn.K8s.Namespace + - Operation + - RemoteOperation + - RemoteService + - Service + - - HostedIn.EKS.Cluster + - HostedIn.K8s.Namespace + - K8s.RemoteNamespace + - RemoteService + - Service + - - HostedIn.EKS.Cluster + - HostedIn.K8s.Namespace + - RemoteService + - Service + - - HostedIn.EKS.Cluster + - HostedIn.K8s.Namespace + - K8s.RemoteNamespace + - RemoteOperation + - RemoteService + - RemoteTarget + - Service + - - HostedIn.EKS.Cluster + - HostedIn.K8s.Namespace + - K8s.RemoteNamespace + - RemoteOperation + - RemoteService + - Service + - - HostedIn.EKS.Cluster + - HostedIn.K8s.Namespace + - RemoteOperation + - RemoteService + - RemoteTarget + - Service + - - HostedIn.EKS.Cluster + - HostedIn.K8s.Namespace + - RemoteOperation + - RemoteService + - Service + - - RemoteService + label_matchers: + - label_names: + - aws.span.kind + regex: ^(CLIENT|PRODUCER|CONSUMER)$ + separator: ; + metric_name_selectors: + - Latency + - Fault + - Error + metric_descriptors: [] + middleware: agenthealth/logs + namespace: AppSignals + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + parse_json_encoded_attr_values: [] + profile: "" + proxy_address: "" + region: "" + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: false + retain_initial_value_of_delta_metric: false + role_arn: "" + shared_credentials_file: [] + version: "1" + awsemf/containerinsights: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: true + eks_fargate_container_insights_enabled: false + endpoint: "https://fake_endpoint" + enhanced_container_insights: false + imds_retries: 1 + local_mode: false + log_group_name: /aws/containerinsights/{ClusterName}/performance + log_retention: 0 + log_stream_name: '{NodeName}' + max_retries: 2 + metric_declarations: + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + - - ClusterName + - Namespace + - Service + - - ClusterName + - Namespace + label_matchers: [] + metric_name_selectors: + - pod_cpu_utilization + - pod_memory_utilization + - pod_network_rx_bytes + - pod_network_tx_bytes + - pod_cpu_utilization_over_pod_limit + - pod_memory_utilization_over_pod_limit + - dimensions: + - - ClusterName + - Namespace + - PodName + label_matchers: [] + metric_name_selectors: + - pod_number_of_container_restarts + - dimensions: + - - ClusterName + - Namespace + - PodName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - pod_cpu_reserved_capacity + - pod_memory_reserved_capacity + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_cpu_utilization + - node_memory_utilization + - node_network_total_bytes + - node_cpu_reserved_capacity + - node_memory_reserved_capacity + - node_number_of_running_pods + - node_number_of_running_containers + - dimensions: + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_cpu_usage_total + - node_cpu_limit + - node_memory_working_set + - node_memory_limit + - dimensions: + - - ClusterName + - InstanceId + - NodeName + - - ClusterName + label_matchers: [] + metric_name_selectors: + - node_filesystem_utilization + - dimensions: + - - ClusterName + - Namespace + - Service + - - ClusterName + label_matchers: [] + metric_name_selectors: + - service_number_of_running_pods + - dimensions: + - - ClusterName + - Namespace + - - ClusterName + label_matchers: [] + metric_name_selectors: + - namespace_number_of_running_pods + - dimensions: + - - ClusterName + label_matchers: [] + metric_name_selectors: + - cluster_node_count + - cluster_failed_node_count + metric_descriptors: [] + middleware: agenthealth/logs + namespace: ContainerInsights + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + parse_json_encoded_attr_values: + - Sources + - kubernetes + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: true + retain_initial_value_of_delta_metric: false + role_arn: "" + shared_credentials_file: [] + version: "0" + awsxray/app_signals: + aws_log_groups: [] + certificate_file_path: "" + endpoint: "" + imds_retries: 1 + index_all_attributes: false + indexed_attributes: + - aws.local.service + - aws.local.operation + - aws.remote.service + - aws.remote.operation + - HostedIn.EKS.Cluster + - HostedIn.K8s.Namespace + - K8s.RemoteNamespace + - aws.remote.target + - HostedIn.Environment + local_mode: false + max_retries: 2 + middleware: agenthealth/traces + no_verify_ssl: false + num_workers: 8 + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + role_arn: "" + shared_credentials_file: [] + telemetry: + enabled: true + include_metadata: true +extensions: + awsproxy/app_signals: + aws_endpoint: "" + endpoint: 0.0.0.0:2000 + local_mode: false + proxy_address: "" + region: "" + role_arn: "" + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents + agenthealth/traces: + is_usage_data_enabled: true + stats: + operations: + - PutTraceSegments +processors: + awsappsignals: + resolvers: + - eks + rules: [] + batch/containerinsights: + metadata_cardinality_limit: 1000 + metadata_keys: [] + send_batch_max_size: 0 + send_batch_size: 8192 + timeout: 5s + resourcedetection: + aks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + attributes: [] + auth: null + azure: + resource_attributes: + azure.resourcegroup.name: + enabled: true + azure.vm.name: + enabled: true + azure.vm.scaleset.name: + enabled: true + azure.vm.size: + enabled: true + cloud.account.id: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + compression: "" + consul: + address: "" + datacenter: "" + meta: {} + namespace: "" + resource_attributes: + azure.resourcegroup.name: + enabled: true + azure.vm.name: + enabled: true + azure.vm.scaleset.name: + enabled: true + azure.vm.size: + enabled: true + cloud.account.id: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + token: '[REDACTED]' + token_file: "" + detectors: + - eks + - env + - ec2 + docker: + resource_attributes: + host.name: + enabled: true + os.type: + enabled: true + ec2: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.image.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + tags: + - ^kubernetes.io/cluster/.*$ + ecs: + resource_attributes: + aws.ecs.cluster.arn: + enabled: true + aws.ecs.launchtype: + enabled: true + aws.ecs.task.arn: + enabled: true + aws.ecs.task.family: + enabled: true + aws.ecs.task.revision: + enabled: true + aws.log.group.arns: + enabled: true + aws.log.group.names: + enabled: true + aws.log.stream.arns: + enabled: true + aws.log.stream.names: + enabled: true + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + eks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + elasticbeanstalk: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + deployment.environment: + enabled: true + service.instance.id: + enabled: true + service.version: + enabled: true + endpoint: "" + gcp: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.id: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + gcp.cloud_run.job.execution: + enabled: true + gcp.cloud_run.job.task_index: + enabled: true + gcp.gce.instance.hostname: + enabled: false + gcp.gce.instance.name: + enabled: false + host.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + k8s.cluster.name: + enabled: true + headers: {} + heroku: + resource_attributes: + cloud.provider: + enabled: true + heroku.app.id: + enabled: true + heroku.dyno.id: + enabled: true + heroku.release.commit: + enabled: true + heroku.release.creation_timestamp: + enabled: true + service.instance.id: + enabled: true + service.name: + enabled: true + service.version: + enabled: true + idle_conn_timeout: 1m30s + lambda: + resource_attributes: + aws.log.group.names: + enabled: true + aws.log.stream.names: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.instance: + enabled: true + faas.max_memory: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + max_conns_per_host: null + max_idle_conns: 100 + max_idle_conns_per_host: null + openshift: + address: "" + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + k8s.cluster.name: + enabled: true + tls: + ca_file: "" + ca_pem: '[REDACTED]' + cert_file: "" + cert_pem: '[REDACTED]' + insecure: false + insecure_skip_verify: false + key_file: "" + key_pem: '[REDACTED]' + max_version: "" + min_version: "" + reload_interval: 0s + server_name_override: "" + token: "" + override: true + read_buffer_size: 0 + system: + hostname_sources: [] + resource_attributes: + host.arch: + enabled: false + host.id: + enabled: false + host.name: + enabled: true + os.description: + enabled: false + os.type: + enabled: true + timeout: 2s + write_buffer_size: 0 +receivers: + awscontainerinsightreceiver: + add_container_name_metric_label: false + add_full_pod_name_metric_label: false + add_service_as_attribute: true + certificate_file_path: "" + cluster_name: TestCluster + collection_interval: 30s + container_orchestrator: eks + enable_control_plane_metrics: false + endpoint: "" + imds_retries: 1 + leader_lock_name: cwagent-clusterleader + leader_lock_using_config_map_only: true + local_mode: false + max_retries: 0 + no_verify_ssl: false + num_workers: 0 + prefer_full_pod_name: false + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 0 + resource_arn: "" + role_arn: "" + shared_credentials_file: [] + otlp/app_signals: + protocols: + grpc: + auth: null + endpoint: 0.0.0.0:4315 + include_metadata: false + keepalive: null + max_concurrent_streams: 0 + max_recv_msg_size_mib: 0 + read_buffer_size: 524288 + tls: null + transport: tcp + write_buffer_size: 0 + http: + auth: null + cors: null + endpoint: 0.0.0.0:4316 + include_metadata: false + logs_url_path: /v1/logs + max_request_body_size: 0 + metrics_url_path: /v1/metrics + response_headers: {} + tls: null + traces_url_path: /v1/traces +service: + extensions: + - awsproxy/app_signals + - agenthealth/logs + - agenthealth/traces + pipelines: + metrics/app_signals: + exporters: + - awsemf/app_signals + processors: + - resourcedetection + - awsappsignals + receivers: + - otlp/app_signals + metrics/containerinsights: + exporters: + - awsemf/containerinsights + processors: + - batch/containerinsights + receivers: + - awscontainerinsightreceiver + traces/app_signals: + exporters: + - awsxray/app_signals + processors: + - resourcedetection + - awsappsignals + receivers: + - otlp/app_signals + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: [] + initial_fields: {} + level: info + output_paths: [] + sampling: + initial: 2 + thereafter: 500 + metrics: + address: "" + level: None + readers: [] + resource: {} + traces: + processors: [] + propagators: [] \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_config.conf b/translator/tocwconfig/sampleConfig/base_appsignals_config.conf new file mode 100644 index 0000000000..007bb60efb --- /dev/null +++ b/translator/tocwconfig/sampleConfig/base_appsignals_config.conf @@ -0,0 +1,27 @@ +[agent] + collection_jitter = "0s" + debug = false + flush_interval = "1s" + flush_jitter = "0s" + hostname = "host_name_from_env" + interval = "60s" + logfile = "" + logtarget = "lumberjack" + metric_batch_size = 1000 + metric_buffer_limit = 10000 + omit_hostname = false + precision = "" + quiet = false + round_interval = false + +[inputs] + +[outputs] + + [[outputs.cloudwatchlogs]] + endpoint_override = "https://fake_endpoint" + force_flush_interval = "5s" + log_stream_name = "host_name_from_env" + region = "us-east-1" + +[processors] diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_config.json b/translator/tocwconfig/sampleConfig/base_appsignals_config.json new file mode 100644 index 0000000000..255feae87a --- /dev/null +++ b/translator/tocwconfig/sampleConfig/base_appsignals_config.json @@ -0,0 +1,17 @@ +{ + "agent": { + "region": "us-east-1" + }, + "logs": { + "metrics_collected": { + "app_signals": {} + }, + "endpoint_override":"https://fake_endpoint" + }, + "traces": { + "traces_collected": { + "app_signals": {} + }, + "endpoint_override":"https://fake_endpoint" + } +} \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml new file mode 100644 index 0000000000..0d0aead667 --- /dev/null +++ b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml @@ -0,0 +1,466 @@ +connectors: {} +exporters: + awsemf/app_signals: + certificate_file_path: "" + detailed_metrics: false + dimension_rollup_option: NoDimensionRollup + disable_metric_extraction: false + eks_fargate_container_insights_enabled: false + endpoint: "" + enhanced_container_insights: false + imds_retries: 0 + local_mode: false + log_group_name: /aws/appsignals/generic + log_retention: 0 + log_stream_name: "" + max_retries: 2 + metric_declarations: + - dimensions: + - - HostedIn.Environment + - Operation + - Service + - - HostedIn.Environment + - Service + label_matchers: + - label_names: + - aws.span.kind + regex: ^(SERVER|LOCAL_ROOT)$ + separator: ; + metric_name_selectors: + - Latency + - Fault + - Error + - dimensions: + - - HostedIn.Environment + - Operation + - RemoteOperation + - RemoteService + - RemoteTarget + - Service + - - HostedIn.Environment + - Operation + - RemoteOperation + - RemoteService + - Service + - - HostedIn.Environment + - RemoteService + - Service + - - HostedIn.Environment + - RemoteOperation + - RemoteService + - RemoteTarget + - Service + - - HostedIn.Environment + - RemoteOperation + - RemoteService + - Service + - - RemoteService + label_matchers: + - label_names: + - aws.span.kind + regex: ^(CLIENT|PRODUCER|CONSUMER)$ + separator: ; + metric_name_selectors: + - Latency + - Fault + - Error + metric_descriptors: [] + middleware: agenthealth/logs + namespace: AppSignals + no_verify_ssl: false + num_workers: 8 + output_destination: cloudwatch + parse_json_encoded_attr_values: [] + profile: "" + proxy_address: "" + region: "" + request_timeout_seconds: 30 + resource_arn: "" + resource_to_telemetry_conversion: + enabled: false + retain_initial_value_of_delta_metric: false + role_arn: "" + shared_credentials_file: [] + version: "1" + awsxray/app_signals: + aws_log_groups: [] + certificate_file_path: "" + endpoint: https://fake_endpoint + imds_retries: 1 + index_all_attributes: false + indexed_attributes: + - aws.local.service + - aws.local.operation + - aws.remote.service + - aws.remote.operation + - HostedIn.EKS.Cluster + - HostedIn.K8s.Namespace + - K8s.RemoteNamespace + - aws.remote.target + - HostedIn.Environment + local_mode: false + max_retries: 2 + middleware: agenthealth/traces + no_verify_ssl: false + num_workers: 8 + profile: "" + proxy_address: "" + region: us-east-1 + request_timeout_seconds: 30 + resource_arn: "" + role_arn: "" + shared_credentials_file: [] + telemetry: + enabled: true + include_metadata: true +extensions: + awsproxy/app_signals: + aws_endpoint: "" + endpoint: 0.0.0.0:2000 + local_mode: false + proxy_address: "" + region: "" + role_arn: "" + agenthealth/logs: + is_usage_data_enabled: true + stats: + operations: + - PutLogEvents + agenthealth/traces: + is_usage_data_enabled: true + stats: + operations: + - PutTraceSegments +processors: + awsappsignals: + resolvers: + - generic + rules: [] + resourcedetection: + aks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + attributes: [] + auth: null + azure: + resource_attributes: + azure.resourcegroup.name: + enabled: true + azure.vm.name: + enabled: true + azure.vm.scaleset.name: + enabled: true + azure.vm.size: + enabled: true + cloud.account.id: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + compression: "" + consul: + address: "" + datacenter: "" + meta: {} + namespace: "" + resource_attributes: + azure.resourcegroup.name: + enabled: true + azure.vm.name: + enabled: true + azure.vm.scaleset.name: + enabled: true + azure.vm.size: + enabled: true + cloud.account.id: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + token: '[REDACTED]' + token_file: "" + detectors: + - eks + - env + - ec2 + docker: + resource_attributes: + host.name: + enabled: true + os.type: + enabled: true + ec2: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.image.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + tags: + - ^kubernetes.io/cluster/.*$ + ecs: + resource_attributes: + aws.ecs.cluster.arn: + enabled: true + aws.ecs.launchtype: + enabled: true + aws.ecs.task.arn: + enabled: true + aws.ecs.task.family: + enabled: true + aws.ecs.task.revision: + enabled: true + aws.log.group.arns: + enabled: true + aws.log.group.names: + enabled: true + aws.log.stream.arns: + enabled: true + aws.log.stream.names: + enabled: true + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + eks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + elasticbeanstalk: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + deployment.environment: + enabled: true + service.instance.id: + enabled: true + service.version: + enabled: true + endpoint: "" + gcp: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.id: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + gcp.cloud_run.job.execution: + enabled: true + gcp.cloud_run.job.task_index: + enabled: true + gcp.gce.instance.hostname: + enabled: false + gcp.gce.instance.name: + enabled: false + host.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + k8s.cluster.name: + enabled: true + headers: {} + heroku: + resource_attributes: + cloud.provider: + enabled: true + heroku.app.id: + enabled: true + heroku.dyno.id: + enabled: true + heroku.release.commit: + enabled: true + heroku.release.creation_timestamp: + enabled: true + service.instance.id: + enabled: true + service.name: + enabled: true + service.version: + enabled: true + idle_conn_timeout: 1m30s + lambda: + resource_attributes: + aws.log.group.names: + enabled: true + aws.log.stream.names: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.instance: + enabled: true + faas.max_memory: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + max_conns_per_host: null + max_idle_conns: 100 + max_idle_conns_per_host: null + openshift: + address: "" + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + k8s.cluster.name: + enabled: true + tls: + ca_file: "" + ca_pem: '[REDACTED]' + cert_file: "" + cert_pem: '[REDACTED]' + insecure: false + insecure_skip_verify: false + key_file: "" + key_pem: '[REDACTED]' + max_version: "" + min_version: "" + reload_interval: 0s + server_name_override: "" + token: "" + override: true + read_buffer_size: 0 + system: + hostname_sources: [] + resource_attributes: + host.arch: + enabled: false + host.id: + enabled: false + host.name: + enabled: true + os.description: + enabled: false + os.type: + enabled: true + timeout: 2s + write_buffer_size: 0 +receivers: + otlp/app_signals: + protocols: + grpc: + auth: null + endpoint: 0.0.0.0:4315 + include_metadata: false + keepalive: null + max_concurrent_streams: 0 + max_recv_msg_size_mib: 0 + read_buffer_size: 524288 + tls: null + transport: tcp + write_buffer_size: 0 + http: + auth: null + cors: null + endpoint: 0.0.0.0:4316 + include_metadata: false + logs_url_path: /v1/logs + max_request_body_size: 0 + metrics_url_path: /v1/metrics + response_headers: {} + tls: null + traces_url_path: /v1/traces +service: + extensions: + - awsproxy/app_signals + - agenthealth/traces + - agenthealth/logs + pipelines: + metrics/app_signals: + exporters: + - awsemf/app_signals + processors: + - resourcedetection + - awsappsignals + receivers: + - otlp/app_signals + traces/app_signals: + exporters: + - awsxray/app_signals + processors: + - resourcedetection + - awsappsignals + receivers: + - otlp/app_signals + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: [] + initial_fields: {} + level: info + output_paths: [] + sampling: + initial: 2 + thereafter: 500 + metrics: + address: "" + level: None + readers: [] + resource: {} + traces: + processors: [] + propagators: [] \ No newline at end of file diff --git a/translator/tocwconfig/tocwconfig_test.go b/translator/tocwconfig/tocwconfig_test.go index aeeab33ce3..106d45cd88 100644 --- a/translator/tocwconfig/tocwconfig_test.go +++ b/translator/tocwconfig/tocwconfig_test.go @@ -36,6 +36,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/tocwconfig/totomlconfig/tomlConfigTemplate" "github.com/aws/amazon-cloudwatch-agent/translator/tocwconfig/toyamlconfig" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/util" ) @@ -67,6 +68,27 @@ func TestBaseContainerInsightsConfig(t *testing.T) { checkTranslation(t, "base_container_insights_config", "darwin", nil, "") } +func TestGenericAppSignalsConfig(t *testing.T) { + resetContext(t) + context.CurrentContext().SetRunInContainer(true) + t.Setenv(config.HOST_NAME, "host_name_from_env") + t.Setenv(config.HOST_IP, "127.0.0.1") + expectedEnvVars := map[string]string{} + checkTranslation(t, "base_appsignals_config", "linux", expectedEnvVars, "") + checkTranslation(t, "base_appsignals_config", "windows", expectedEnvVars, "") +} + +func TestAppSignalsAndKubernetesConfig(t *testing.T) { + resetContext(t) + context.CurrentContext().SetRunInContainer(true) + t.Setenv(config.HOST_NAME, "host_name_from_env") + t.Setenv(config.HOST_IP, "127.0.0.1") + t.Setenv(common.KubernetesEnvVar, "use_appsignals_eks_config") + expectedEnvVars := map[string]string{} + checkTranslation(t, "appsignals_and_kubernetes_config", "linux", expectedEnvVars, "") + checkTranslation(t, "appsignals_and_kubernetes_config", "windows", expectedEnvVars, "") +} + func TestEmfAndKubernetesConfig(t *testing.T) { resetContext(t) readCommonConfig(t, "./sampleConfig/commonConfig/withCredentials.toml") diff --git a/translator/translate/otel/common/appsignals.go b/translator/translate/otel/common/appsignals.go new file mode 100644 index 0000000000..c6887bcf39 --- /dev/null +++ b/translator/translate/otel/common/appsignals.go @@ -0,0 +1,13 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package common + +import "os" + +const KubernetesEnvVar = "K8S_NAMESPACE" + +func IsAppSignalsKubernetes() bool { + _, isSet := os.LookupEnv(KubernetesEnvVar) + return isSet +} diff --git a/translator/translate/otel/common/appsignals_test.go b/translator/translate/otel/common/appsignals_test.go new file mode 100644 index 0000000000..5a4b8a0ba3 --- /dev/null +++ b/translator/translate/otel/common/appsignals_test.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsAppSignalsKubernetes(t *testing.T) { + assert.False(t, IsAppSignalsKubernetes()) + t.Setenv(KubernetesEnvVar, "TEST") + assert.True(t, IsAppSignalsKubernetes()) +} diff --git a/translator/translate/otel/common/common.go b/translator/translate/otel/common/common.go index be3f9d2190..1e1bef0e73 100644 --- a/translator/translate/otel/common/common.go +++ b/translator/translate/otel/common/common.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "gopkg.in/yaml.v3" ) const ( @@ -61,6 +62,18 @@ const ( PipelineNameHost = "host" PipelineNameHostDeltaMetrics = "hostDeltaMetrics" PipelineNameEmfLogs = "emf_logs" + AppSignals = "app_signals" + AppSignalsRules = "rules" +) + +var ( + AppSignalsTraces = ConfigKey(TracesKey, TracesCollectedKey, AppSignals) + AppSignalsMetrics = ConfigKey(LogsKey, MetricsCollectedKey, AppSignals) + + AppSignalsConfigKeys = map[component.DataType]string{ + component.DataTypeTraces: AppSignalsTraces, + component.DataTypeMetrics: AppSignalsMetrics, + } ) // Translator is used to translate the JSON config into an @@ -306,3 +319,16 @@ func GetOrDefaultDuration(conf *confmap.Conf, keychain []string, defaultDuration } return defaultDuration } + +func GetYamlFileToYamlConfig(cfg interface{}, yamlFile string) (interface{}, error) { + var cfgMap map[string]interface{} + if err := yaml.Unmarshal([]byte(yamlFile), &cfgMap); err != nil { + return nil, fmt.Errorf("unable to read default config: %w", err) + } + + conf := confmap.NewFromStringMap(cfgMap) + if err := conf.Unmarshal(&cfg); err != nil { + return nil, fmt.Errorf("unable to unmarshal config: %w", err) + } + return cfg, nil +} diff --git a/translator/translate/otel/exporter/awsemf/appsignals_config_eks.yaml b/translator/translate/otel/exporter/awsemf/appsignals_config_eks.yaml new file mode 100644 index 0000000000..594116bdfb --- /dev/null +++ b/translator/translate/otel/exporter/awsemf/appsignals_config_eks.yaml @@ -0,0 +1,36 @@ +log_group_name: "/aws/appsignals/eks" +namespace: "AppSignals" +middleware: agenthealth/logs +dimension_rollup_option: "NoDimensionRollup" +metric_declarations: + - dimensions: + - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, Operation] + - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service] + label_matchers: + - label_names: + - aws.span.kind + regex: '^(SERVER|LOCAL_ROOT)$' + metric_name_selectors: + - Latency + - Fault + - Error + - dimensions: + - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, Operation, RemoteService, RemoteOperation, K8s.RemoteNamespace, RemoteTarget] + - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, Operation, RemoteService, RemoteOperation, K8s.RemoteNamespace] + - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, Operation, RemoteService, RemoteOperation, RemoteTarget] + - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, Operation, RemoteService, RemoteOperation] + - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, K8s.RemoteNamespace] + - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, RemoteService] + - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, RemoteOperation, K8s.RemoteNamespace, RemoteTarget] + - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, RemoteOperation, K8s.RemoteNamespace] + - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, RemoteOperation, RemoteTarget] + - [HostedIn.EKS.Cluster, HostedIn.K8s.Namespace, Service, RemoteService, RemoteOperation] + - [RemoteService] + label_matchers: + - label_names: + - aws.span.kind + regex: '^(CLIENT|PRODUCER|CONSUMER)$' + metric_name_selectors: + - Latency + - Fault + - Error \ No newline at end of file diff --git a/translator/translate/otel/exporter/awsemf/appsignals_config_generic.yaml b/translator/translate/otel/exporter/awsemf/appsignals_config_generic.yaml new file mode 100644 index 0000000000..d447430044 --- /dev/null +++ b/translator/translate/otel/exporter/awsemf/appsignals_config_generic.yaml @@ -0,0 +1,31 @@ +log_group_name: "/aws/appsignals/generic" +namespace: "AppSignals" +middleware: agenthealth/logs +dimension_rollup_option: "NoDimensionRollup" +metric_declarations: + - dimensions: + - [HostedIn.Environment, Service, Operation] + - [HostedIn.Environment, Service] + label_matchers: + - label_names: + - aws.span.kind + regex: '^(SERVER|LOCAL_ROOT)$' + metric_name_selectors: + - Latency + - Fault + - Error + - dimensions: + - [HostedIn.Environment, Service, Operation, RemoteService, RemoteOperation, RemoteTarget] + - [HostedIn.Environment, Service, Operation, RemoteService, RemoteOperation] + - [HostedIn.Environment, Service, RemoteService] + - [HostedIn.Environment, Service, RemoteService, RemoteOperation, RemoteTarget] + - [HostedIn.Environment, Service, RemoteService, RemoteOperation] + - [RemoteService] + label_matchers: + - label_names: + - aws.span.kind + regex: '^(CLIENT|PRODUCER|CONSUMER)$' + metric_name_selectors: + - Latency + - Fault + - Error \ No newline at end of file diff --git a/translator/translate/otel/exporter/awsemf/translator.go b/translator/translate/otel/exporter/awsemf/translator.go index 20c12cb2ae..5fd7871b64 100644 --- a/translator/translate/otel/exporter/awsemf/translator.go +++ b/translator/translate/otel/exporter/awsemf/translator.go @@ -31,6 +31,12 @@ var defaultKubernetesConfig string //go:embed awsemf_default_prometheus.yaml var defaultPrometheusConfig string +//go:embed appsignals_config_eks.yaml +var appSignalsConfigEks string + +//go:embed appsignals_config_generic.yaml +var appSignalsConfigGeneric string + var ( ecsBasePathKey = common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.ECSKey) kubernetesBasePathKey = common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.KubernetesKey) @@ -63,6 +69,12 @@ func (t *translator) Translate(c *confmap.Conf) (component.Config, error) { cfg := t.factory.CreateDefaultConfig().(*awsemfexporter.Config) cfg.MiddlewareID = &agenthealth.LogsID + if common.IsAppSignalsKubernetes() && t.name == common.AppSignals { + return common.GetYamlFileToYamlConfig(cfg, appSignalsConfigEks) + } else if t.name == common.AppSignals { + return common.GetYamlFileToYamlConfig(cfg, appSignalsConfigGeneric) + } + var defaultConfig string if isEcs(c) { defaultConfig = defaultEcsConfig diff --git a/translator/translate/otel/exporter/awsemf/translator_test.go b/translator/translate/otel/exporter/awsemf/translator_test.go index 2a4e0889f8..0b05f10ddb 100644 --- a/translator/translate/otel/exporter/awsemf/translator_test.go +++ b/translator/translate/otel/exporter/awsemf/translator_test.go @@ -4,16 +4,20 @@ package awsemf import ( + "path/filepath" "testing" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "github.com/aws/amazon-cloudwatch-agent/internal/util/testutil" legacytranslator "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) var nilSlice []string @@ -687,3 +691,53 @@ func TestTranslator(t *testing.T) { }) } } + +func TestTranslateAppSignals(t *testing.T) { + tt := NewTranslatorWithName(common.AppSignals) + testCases := map[string]struct { + input map[string]interface{} + want *confmap.Conf + wantErr error + isKubernetes bool + }{ + "WithAppSignalsEnabledEKS": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }}, + want: testutil.GetConf(t, filepath.Join("appsignals_config_eks.yaml")), + isKubernetes: true, + }, + "WithAppSignalsEnabledGeneric": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }}, + want: testutil.GetConf(t, filepath.Join("appsignals_config_generic.yaml")), + isKubernetes: false, + }, + } + factory := awsemfexporter.NewFactory() + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + if testCase.isKubernetes { + t.Setenv(common.KubernetesEnvVar, "TEST") + } + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.Equal(t, testCase.wantErr, err) + if err == nil { + require.NotNil(t, got) + gotCfg, ok := got.(*awsemfexporter.Config) + require.True(t, ok) + wantCfg := factory.CreateDefaultConfig() + require.NoError(t, component.UnmarshalConfig(testCase.want, wantCfg)) + assert.Equal(t, wantCfg, gotCfg) + } + }) + } +} diff --git a/translator/translate/otel/exporter/awsxray/translator.go b/translator/translate/otel/exporter/awsxray/translator.go index a67404ecbb..d1614a0444 100644 --- a/translator/translate/otel/exporter/awsxray/translator.go +++ b/translator/translate/otel/exporter/awsxray/translator.go @@ -4,6 +4,7 @@ package awsxray import ( + _ "embed" "fmt" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter" @@ -49,6 +50,15 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: common.TracesKey} } cfg := t.factory.CreateDefaultConfig().(*awsxrayexporter.Config) + + if isAppSignals(conf) { + cfg.IndexedAttributes = []string{ + "aws.local.service", "aws.local.operation", "aws.remote.service", "aws.remote.operation", + "HostedIn.EKS.Cluster", "HostedIn.K8s.Namespace", "K8s.RemoteNamespace", "aws.remote.target", + "HostedIn.Environment", + } + } + c := confmap.NewFromStringMap(map[string]interface{}{ "telemetry": map[string]interface{}{ "enabled": true, @@ -106,3 +116,7 @@ func getRegion(conf *confmap.Conf) string { } return region } + +func isAppSignals(conf *confmap.Conf) bool { + return conf.IsSet(common.AppSignalsTraces) +} diff --git a/translator/translate/otel/exporter/awsxray/translator_test.go b/translator/translate/otel/exporter/awsxray/translator_test.go index 743362cc8f..a3ae0847d6 100644 --- a/translator/translate/otel/exporter/awsxray/translator_test.go +++ b/translator/translate/otel/exporter/awsxray/translator_test.go @@ -52,6 +52,35 @@ func TestTranslator(t *testing.T) { input: testutil.GetJson(t, filepath.Join("testdata", "config.json")), want: testutil.GetConf(t, filepath.Join("testdata", "config.yaml")), }, + "WithAppSignalsEnabled": { + input: map[string]interface{}{ + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }}, + want: confmap.NewFromStringMap(map[string]interface{}{ + "indexed_attributes": []string{ + "aws.local.service", + "aws.local.operation", + "aws.remote.service", + "aws.remote.operation", + "HostedIn.EKS.Cluster", + "HostedIn.K8s.Namespace", + "K8s.RemoteNamespace", + "aws.remote.target", + "HostedIn.Environment", + }, + "region": "us-east-1", + "role_arn": "global_arn", + "imds_retries": 1, + "telemetry": map[string]interface{}{ + "enabled": true, + "include_metadata": true, + }, + "middleware": "agenthealth/traces", + }), + }, } factory := awsxrayexporter.NewFactory() for name, testCase := range testCases { diff --git a/translator/translate/otel/extension/awsproxy/translator.go b/translator/translate/otel/extension/awsproxy/translator.go new file mode 100644 index 0000000000..4aeab63887 --- /dev/null +++ b/translator/translate/otel/extension/awsproxy/translator.go @@ -0,0 +1,37 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsproxy + +import ( + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/extension" + + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +type translator struct { + name string + factory extension.Factory +} + +var _ common.Translator[component.Config] = (*translator)(nil) + +func NewTranslator() common.Translator[component.Config] { + return NewTranslatorWithName("") +} + +func NewTranslatorWithName(name string) common.Translator[component.Config] { + return &translator{name, awsproxy.NewFactory()} +} + +func (t *translator) ID() component.ID { + return component.NewIDWithName(t.factory.Type(), t.name) +} + +func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { + cfg := t.factory.CreateDefaultConfig().(*awsproxy.Config) + return cfg, nil +} diff --git a/translator/translate/otel/extension/awsproxy/translator_test.go b/translator/translate/otel/extension/awsproxy/translator_test.go new file mode 100644 index 0000000000..5934a92e91 --- /dev/null +++ b/translator/translate/otel/extension/awsproxy/translator_test.go @@ -0,0 +1,26 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsproxy + +import ( + "testing" + + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/confmap" +) + +func TestTranslate(t *testing.T) { + tt := NewTranslator() + conf := confmap.NewFromStringMap(map[string]interface{}{}) + got, err := tt.Translate(conf) + if err == nil { + require.NotNil(t, got) + gotCfg, ok := got.(*awsproxy.Config) + require.True(t, ok) + wantCfg := awsproxy.NewFactory().CreateDefaultConfig() + assert.Equal(t, wantCfg, gotCfg) + } +} diff --git a/translator/translate/otel/pipeline/appsignals/translator.go b/translator/translate/otel/pipeline/appsignals/translator.go new file mode 100644 index 0000000000..71aafb8d92 --- /dev/null +++ b/translator/translate/otel/pipeline/appsignals/translator.go @@ -0,0 +1,63 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package appsignals + +import ( + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awsemf" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awsxray" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/awsproxy" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/awsappsignals" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/resourcedetection" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/otlp" +) + +type translator struct { + dataType component.DataType +} + +var _ common.Translator[*common.ComponentTranslators] = (*translator)(nil) + +func NewTranslator(dataType component.DataType) common.Translator[*common.ComponentTranslators] { + return &translator{ + dataType, + } +} + +func (t *translator) ID() component.ID { + return component.NewIDWithName(t.dataType, common.AppSignals) +} + +func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, error) { + configKey, ok := common.AppSignalsConfigKeys[t.dataType] + if !ok { + return nil, fmt.Errorf("no config key defined for data type: %s", t.dataType) + } + if conf == nil || !conf.IsSet(configKey) { + return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: configKey} + } + + translators := &common.ComponentTranslators{ + Receivers: common.NewTranslatorMap(otlp.NewTranslatorWithName(common.AppSignals, otlp.WithDataType(t.dataType))), + Processors: common.NewTranslatorMap(resourcedetection.NewTranslator(resourcedetection.WithDataType(t.dataType)), awsappsignals.NewTranslator(awsappsignals.WithDataType(t.dataType))), + Exporters: common.NewTranslatorMap[component.Config](), + Extensions: common.NewTranslatorMap[component.Config](), + } + + if t.dataType == component.DataTypeTraces { + translators.Exporters.Set(awsxray.NewTranslatorWithName(common.AppSignals)) + translators.Extensions.Set(awsproxy.NewTranslatorWithName(common.AppSignals)) + translators.Extensions.Set(agenthealth.NewTranslator(component.DataTypeTraces, []string{agenthealth.OperationPutTraceSegments})) + } else { + translators.Exporters.Set(awsemf.NewTranslatorWithName(common.AppSignals)) + translators.Extensions.Set(agenthealth.NewTranslator(component.DataTypeLogs, []string{agenthealth.OperationPutLogEvents})) + } + return translators, nil +} diff --git a/translator/translate/otel/pipeline/appsignals/translator_test.go b/translator/translate/otel/pipeline/appsignals/translator_test.go new file mode 100644 index 0000000000..2a250903a5 --- /dev/null +++ b/translator/translate/otel/pipeline/appsignals/translator_test.go @@ -0,0 +1,121 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package appsignals + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + + "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +func TestTranslatorTraces(t *testing.T) { + type want struct { + receivers []string + processors []string + exporters []string + extensions []string + } + tt := NewTranslator(component.DataTypeTraces) + assert.EqualValues(t, "traces/app_signals", tt.ID().String()) + testCases := map[string]struct { + input map[string]interface{} + want *want + wantErr error + }{ + "WithoutTracesCollectedKey": { + input: map[string]interface{}{}, + wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(common.AppSignalsTraces)}, + }, + "WithAppSignalsEnabledTraces": { + input: map[string]interface{}{ + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + }, + want: &want{ + receivers: []string{"otlp/app_signals"}, + processors: []string{"resourcedetection", "awsappsignals"}, + exporters: []string{"awsxray/app_signals"}, + extensions: []string{"awsproxy/app_signals", "agenthealth/traces"}, + }, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.Equal(t, testCase.wantErr, err) + if testCase.want == nil { + assert.Nil(t, got) + } else { + require.NotNil(t, got) + assert.Equal(t, testCase.want.receivers, collections.MapSlice(got.Receivers.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.processors, collections.MapSlice(got.Processors.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.exporters, collections.MapSlice(got.Exporters.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.extensions, collections.MapSlice(got.Extensions.Keys(), component.ID.String)) + } + }) + } +} + +func TestTranslatorMetrics(t *testing.T) { + type want struct { + receivers []string + processors []string + exporters []string + extensions []string + } + tt := NewTranslator(component.DataTypeMetrics) + assert.EqualValues(t, "metrics/app_signals", tt.ID().String()) + testCases := map[string]struct { + input map[string]interface{} + want *want + wantErr error + }{ + "WithoutMetricsCollectedKey": { + input: map[string]interface{}{}, + wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(common.AppSignalsMetrics)}, + }, + "WithAppSignalsEnabledMetrics": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + }, + want: &want{ + receivers: []string{"otlp/app_signals"}, + processors: []string{"resourcedetection", "awsappsignals"}, + exporters: []string{"awsemf/app_signals"}, + extensions: []string{"agenthealth/logs"}, + }, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.Equal(t, testCase.wantErr, err) + if testCase.want == nil { + assert.Nil(t, got) + } else { + require.NotNil(t, got) + assert.Equal(t, testCase.want.receivers, collections.MapSlice(got.Receivers.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.processors, collections.MapSlice(got.Processors.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.exporters, collections.MapSlice(got.Exporters.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.extensions, collections.MapSlice(got.Extensions.Keys(), component.ID.String)) + } + }) + } +} diff --git a/translator/translate/otel/processor/awsappsignals/testdata/config_eks.yaml b/translator/translate/otel/processor/awsappsignals/testdata/config_eks.yaml new file mode 100644 index 0000000000..06bf8b38d1 --- /dev/null +++ b/translator/translate/otel/processor/awsappsignals/testdata/config_eks.yaml @@ -0,0 +1 @@ +resolvers: ["eks"] \ No newline at end of file diff --git a/translator/translate/otel/processor/awsappsignals/testdata/config_generic.yaml b/translator/translate/otel/processor/awsappsignals/testdata/config_generic.yaml new file mode 100644 index 0000000000..7524660208 --- /dev/null +++ b/translator/translate/otel/processor/awsappsignals/testdata/config_generic.yaml @@ -0,0 +1 @@ +resolvers: ["generic"] \ No newline at end of file diff --git a/translator/translate/otel/processor/awsappsignals/testdata/invalidRulesConfig.json b/translator/translate/otel/processor/awsappsignals/testdata/invalidRulesConfig.json new file mode 100644 index 0000000000..1c8bd7e378 --- /dev/null +++ b/translator/translate/otel/processor/awsappsignals/testdata/invalidRulesConfig.json @@ -0,0 +1,20 @@ +{ + "logs": { + "metrics_collected": { + "app_signals": { + "rules": [ + { + "selectors": [ + { + "dimension": "dimension1", + "match": "match1" + } + ], + "action": "replace", + "rule_name": "replace01" + } + ] + } + } + } +} diff --git a/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfig.json b/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfig.json new file mode 100644 index 0000000000..7d6252542d --- /dev/null +++ b/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfig.json @@ -0,0 +1,63 @@ +{ + "logs": { + "metrics_collected": { + "app_signals": { + "rules": [ + { + "selectors": [ + { + "dimension": "Operation", + "match": "POST *" + }, + { + "dimension": "RemoteService", + "match": "*" + } + ], + "action": "keep" + }, + { + "selectors": [ + { + "dimension": "Operation", + "match": "GET *" + }, + { + "dimension": "RemoteService", + "match": "*" + } + ], + "action": "keep", + "rule_name": "keep02" + }, + { + "selectors": [ + { + "dimension": "Operation", + "match": "POST *" + } + ], + "action": "drop", + "rule_name": "drop01" + }, + { + "selectors": [ + { + "dimension": "Operation", + "match": "*" + } + ], + "replacements": [ + { + "target_dimension": "RemoteOperation", + "value": "This is a test string" + } + ], + "action": "replace", + "rule_name": "replace01" + } + ] + } + } + } +} \ No newline at end of file diff --git a/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfigEKS.yaml b/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfigEKS.yaml new file mode 100644 index 0000000000..0d69e235f6 --- /dev/null +++ b/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfigEKS.yaml @@ -0,0 +1,28 @@ +resolvers: ["eks"] +rules: + - selectors: + - dimension: Operation + match: "POST *" + - dimension: RemoteService + match: "*" + action: keep + - selectors: + - dimension: Operation + match: "GET *" + - dimension: RemoteService + match: "*" + action: keep + rule_name: "keep02" + - selectors: + - dimension: Operation + match: "POST *" + action: drop + rule_name: "drop01" + - selectors: + - dimension: Operation + match: "*" + replacements: + - target_dimension: RemoteOperation + value: "This is a test string" + action: replace + rule_name: "replace01" \ No newline at end of file diff --git a/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfigGeneric.yaml b/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfigGeneric.yaml new file mode 100644 index 0000000000..d3453e91ee --- /dev/null +++ b/translator/translate/otel/processor/awsappsignals/testdata/validRulesConfigGeneric.yaml @@ -0,0 +1,28 @@ +resolvers: ["generic"] +rules: + - selectors: + - dimension: Operation + match: "POST *" + - dimension: RemoteService + match: "*" + action: keep + - selectors: + - dimension: Operation + match: "GET *" + - dimension: RemoteService + match: "*" + action: keep + rule_name: "keep02" + - selectors: + - dimension: Operation + match: "POST *" + action: drop + rule_name: "drop01" + - selectors: + - dimension: Operation + match: "*" + replacements: + - target_dimension: RemoteOperation + value: "This is a test string" + action: replace + rule_name: "replace01" \ No newline at end of file diff --git a/translator/translate/otel/processor/awsappsignals/translator.go b/translator/translate/otel/processor/awsappsignals/translator.go new file mode 100644 index 0000000000..de8c5f1d2a --- /dev/null +++ b/translator/translate/otel/processor/awsappsignals/translator.go @@ -0,0 +1,128 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsappsignals + +import ( + _ "embed" + "errors" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/processor" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals/rules" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +type translator struct { + name string + dataType component.DataType + factory processor.Factory +} + +type Option interface { + apply(t *translator) +} + +type optionFunc func(t *translator) + +func (o optionFunc) apply(t *translator) { + o(t) +} + +// WithDataType determines where the translator should look to find +// the configuration. +func WithDataType(dataType component.DataType) Option { + return optionFunc(func(t *translator) { + t.dataType = dataType + }) +} + +var _ common.Translator[component.Config] = (*translator)(nil) + +func NewTranslator(opts ...Option) common.Translator[component.Config] { + t := &translator{factory: awsappsignals.NewFactory()} + for _, opt := range opts { + opt.apply(t) + } + return t +} + +func (t *translator) ID() component.ID { + return component.NewIDWithName(t.factory.Type(), t.name) +} + +func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { + configKey := common.AppSignalsConfigKeys[t.dataType] + cfg := t.factory.CreateDefaultConfig().(*awsappsignals.Config) + if common.IsAppSignalsKubernetes() { + cfg.Resolvers = []string{"eks"} + } else { + cfg.Resolvers = []string{"generic"} + } + return t.translateCustomRules(conf, configKey, cfg) +} + +func (t *translator) translateCustomRules(conf *confmap.Conf, configKey string, cfg *awsappsignals.Config) (component.Config, error) { + var rulesList []rules.Rule + rulesConfigKey := common.ConfigKey(configKey, common.AppSignalsRules) + if conf.IsSet(rulesConfigKey) { + for _, rule := range conf.Get(rulesConfigKey).([]interface{}) { + ruleConfig := rules.Rule{} + ruleMap := rule.(map[string]interface{}) + selectors := ruleMap["selectors"].([]interface{}) + action := ruleMap["action"].(string) + + ruleConfig.Selectors = getServiceSelectors(selectors) + if ruleName, ok := ruleMap["rule_name"]; ok { + ruleConfig.RuleName = ruleName.(string) + } + + var err error + ruleConfig.Action, err = rules.GetAllowListAction(action) + if err != nil { + return nil, err + } + if ruleConfig.Action == rules.AllowListActionReplace { + replacements, ok := ruleMap["replacements"] + if !ok { + return nil, errors.New("replace action set, but no replacements defined for service rule") + } + ruleConfig.Replacements = getServiceReplacements(replacements) + } + + rulesList = append(rulesList, ruleConfig) + } + cfg.Rules = rulesList + } + + return cfg, nil +} + +func getServiceSelectors(selectorsList []interface{}) []rules.Selector { + var selectors []rules.Selector + for _, selector := range selectorsList { + selectorConfig := rules.Selector{} + selectorsMap := selector.(map[string]interface{}) + + selectorConfig.Dimension = selectorsMap["dimension"].(string) + selectorConfig.Match = selectorsMap["match"].(string) + selectors = append(selectors, selectorConfig) + } + return selectors +} + +func getServiceReplacements(replacementsList interface{}) []rules.Replacement { + var replacements []rules.Replacement + for _, replacement := range replacementsList.([]interface{}) { + replacementConfig := rules.Replacement{} + replacementMap := replacement.(map[string]interface{}) + + replacementConfig.TargetDimension = replacementMap["target_dimension"].(string) + replacementConfig.Value = replacementMap["value"].(string) + replacements = append(replacements, replacementConfig) + } + return replacements +} diff --git a/translator/translate/otel/processor/awsappsignals/translator_test.go b/translator/translate/otel/processor/awsappsignals/translator_test.go new file mode 100644 index 0000000000..217563e274 --- /dev/null +++ b/translator/translate/otel/processor/awsappsignals/translator_test.go @@ -0,0 +1,105 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsappsignals + +import ( + _ "embed" + "encoding/json" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsappsignals" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +var ( + //go:embed testdata/config_eks.yaml + validAppSignalsYamlEKS string + //go:embed testdata/config_generic.yaml + validAppSignalsYamlGeneric string + //go:embed testdata/validRulesConfig.json + validAppSignalsRulesConfig string + //go:embed testdata/validRulesConfigEKS.yaml + validAppSignalsRulesYamlEKS string + //go:embed testdata/validRulesConfigGeneric.yaml + validAppSignalsRulesYamlGeneric string + //go:embed testdata/invalidRulesConfig.json + invalidAppSignalsRulesConfig string +) + +func TestTranslate(t *testing.T) { + var validJsonMap, invalidJsonMap map[string]interface{} + json.Unmarshal([]byte(validAppSignalsRulesConfig), &validJsonMap) + json.Unmarshal([]byte(invalidAppSignalsRulesConfig), &invalidJsonMap) + + tt := NewTranslator(WithDataType(component.DataTypeMetrics)) + testCases := map[string]struct { + input map[string]interface{} + want string + wantErr error + isKubernetes bool + }{ + //The config for the awsappsignals processor is https://code.amazon.com/packages/AWSTracingSamplePetClinic/blobs/97ce3c409986ac8ae014de1e3fe71fdb98080f22/--/eks/appsignals/auto-instrumentation-new.yaml#L20 + //The awsappsignals processor config does not have a platform field, instead it gets added to resolvers when marshalled + "WithAppSignalsEnabledEKS": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }}, + want: validAppSignalsYamlEKS, + isKubernetes: true, + }, + "WithAppSignalsCustomRulesEnabledEKS": { + input: validJsonMap, + want: validAppSignalsRulesYamlEKS, + isKubernetes: true, + }, + "WithAppSignalsEnabledGeneric": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }}, + want: validAppSignalsYamlGeneric, + isKubernetes: false, + }, + "WithAppSignalsCustomRulesEnabledGeneric": { + input: validJsonMap, + want: validAppSignalsRulesYamlGeneric, + isKubernetes: false, + }, + "WithInvalidAppSignalsCustomRulesEnabled": { + input: invalidJsonMap, + wantErr: errors.New("replace action set, but no replacements defined for service rule"), + }, + } + factory := awsappsignals.NewFactory() + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + if testCase.isKubernetes { + t.Setenv(common.KubernetesEnvVar, "TEST") + } + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.Equal(t, testCase.wantErr, err) + if err == nil { + require.NotNil(t, got) + gotCfg, ok := got.(*awsappsignals.Config) + require.True(t, ok) + wantCfg := factory.CreateDefaultConfig() + yamlConfig, err := common.GetYamlFileToYamlConfig(wantCfg, testCase.want) + require.NoError(t, err) + assert.Equal(t, yamlConfig.(*awsappsignals.Config), gotCfg) + } + }) + } +} diff --git a/translator/translate/otel/processor/resourcedetection/configs/config.yaml b/translator/translate/otel/processor/resourcedetection/configs/config.yaml new file mode 100644 index 0000000000..b957ade437 --- /dev/null +++ b/translator/translate/otel/processor/resourcedetection/configs/config.yaml @@ -0,0 +1,6 @@ +detectors: [eks, env, ec2] +override: true +timeout: 2s +ec2: + tags: + - ^kubernetes.io/cluster/.*$ \ No newline at end of file diff --git a/translator/translate/otel/processor/resourcedetection/translator.go b/translator/translate/otel/processor/resourcedetection/translator.go new file mode 100644 index 0000000000..9ac725b4dd --- /dev/null +++ b/translator/translate/otel/processor/resourcedetection/translator.go @@ -0,0 +1,61 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcedetection + +import ( + _ "embed" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/processor" + + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +//go:embed configs/config.yaml +var appSignalsAwsResourceDetectionConfig string + +type translator struct { + name string + dataType component.DataType + factory processor.Factory +} + +type Option interface { + apply(t *translator) +} + +type optionFunc func(t *translator) + +func (o optionFunc) apply(t *translator) { + o(t) +} + +// WithDataType determines where the translator should look to find +// the configuration. +func WithDataType(dataType component.DataType) Option { + return optionFunc(func(t *translator) { + t.dataType = dataType + }) +} + +var _ common.Translator[component.Config] = (*translator)(nil) + +func NewTranslator(opts ...Option) common.Translator[component.Config] { + t := &translator{factory: resourcedetectionprocessor.NewFactory()} + for _, opt := range opts { + opt.apply(t) + } + return t +} + +func (t *translator) ID() component.ID { + return component.NewIDWithName(t.factory.Type(), t.name) +} + +func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { + cfg := t.factory.CreateDefaultConfig().(*resourcedetectionprocessor.Config) + return common.GetYamlFileToYamlConfig(cfg, appSignalsAwsResourceDetectionConfig) +} diff --git a/translator/translate/otel/processor/resourcedetection/translator_test.go b/translator/translate/otel/processor/resourcedetection/translator_test.go new file mode 100644 index 0000000000..d84624937f --- /dev/null +++ b/translator/translate/otel/processor/resourcedetection/translator_test.go @@ -0,0 +1,60 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package resourcedetection + +import ( + "testing" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" +) + +func TestTranslate(t *testing.T) { + tt := NewTranslator(WithDataType(component.DataTypeTraces)) + testCases := map[string]struct { + input map[string]interface{} + want *confmap.Conf + wantErr error + }{ + "WithAppSignalsEnabled": { + input: map[string]interface{}{ + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }}, + want: confmap.NewFromStringMap(map[string]interface{}{ + "detectors": []interface{}{ + "eks", + "env", + "ec2", + }, + "timeout": "2s", + "override": true, + "ec2": map[string]interface{}{ + "tags": []interface{}{"^kubernetes.io/cluster/.*$"}, + }, + }), + }, + } + factory := resourcedetectionprocessor.NewFactory() + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.Equal(t, testCase.wantErr, err) + if err == nil { + require.NotNil(t, got) + gotCfg, ok := got.(*resourcedetectionprocessor.Config) + require.True(t, ok) + wantCfg := factory.CreateDefaultConfig() + require.NoError(t, component.UnmarshalConfig(testCase.want, wantCfg)) + assert.Equal(t, wantCfg, gotCfg) + } + }) + } +} diff --git a/translator/translate/otel/receiver/otlp/appsignals_config.yaml b/translator/translate/otel/receiver/otlp/appsignals_config.yaml new file mode 100644 index 0000000000..560470c3a2 --- /dev/null +++ b/translator/translate/otel/receiver/otlp/appsignals_config.yaml @@ -0,0 +1,5 @@ +protocols: + grpc: + endpoint: 0.0.0.0:4315 + http: + endpoint: 0.0.0.0:4316 \ No newline at end of file diff --git a/translator/translate/otel/receiver/otlp/translator.go b/translator/translate/otel/receiver/otlp/translator.go index 01e57e3fa7..48c86c0fd3 100644 --- a/translator/translate/otel/receiver/otlp/translator.go +++ b/translator/translate/otel/receiver/otlp/translator.go @@ -4,6 +4,7 @@ package otlp import ( + _ "embed" "fmt" "go.opentelemetry.io/collector/component" @@ -23,6 +24,9 @@ var ( configKeys = map[component.DataType]string{ component.DataTypeTraces: common.ConfigKey(common.TracesKey, common.TracesCollectedKey, common.OtlpKey), } + + //go:embed appsignals_config.yaml + appSignalsConfig string ) type translator struct { @@ -59,11 +63,26 @@ func NewTranslator(opts ...Option) common.Translator[component.Config] { return t } +func NewTranslatorWithName(name string, opts ...Option) common.Translator[component.Config] { + t := &translator{name: name, factory: otlpreceiver.NewFactory()} + for _, opt := range opts { + opt.apply(t) + } + return t +} + func (t *translator) ID() component.ID { return component.NewIDWithName(t.factory.Type(), t.name) } func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { + cfg := t.factory.CreateDefaultConfig().(*otlpreceiver.Config) + + // TODO: Should follow pattern done in awsemf and awsexray exporter translations (i.e should be integrated with standard otlp translation) + if t.name == common.AppSignals { + return common.GetYamlFileToYamlConfig(cfg, appSignalsConfig) + } + configKey, ok := configKeys[t.dataType] if !ok { return nil, fmt.Errorf("no config key defined for data type: %s", t.dataType) @@ -71,7 +90,6 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { if conf == nil || !conf.IsSet(configKey) { return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: configKey} } - cfg := t.factory.CreateDefaultConfig().(*otlpreceiver.Config) cfg.GRPC.NetAddr.Endpoint = defaultGrpcEndpoint cfg.HTTP.Endpoint = defaultHttpEndpoint if endpoint, ok := common.GetString(conf, common.ConfigKey(configKey, "grpc_endpoint")); ok { diff --git a/translator/translate/otel/receiver/otlp/translator_test.go b/translator/translate/otel/receiver/otlp/translator_test.go index 3eb8d89c23..06a6672a1e 100644 --- a/translator/translate/otel/receiver/otlp/translator_test.go +++ b/translator/translate/otel/receiver/otlp/translator_test.go @@ -74,3 +74,47 @@ func TestTracesTranslator(t *testing.T) { }) } } + +func TestTranslateAppSignals(t *testing.T) { + tt := NewTranslatorWithName(common.AppSignals, WithDataType(component.DataTypeTraces)) + testCases := map[string]struct { + input map[string]interface{} + want *confmap.Conf + wantErr error + }{ + "WithAppSignalsEnabledTraces": { + input: map[string]interface{}{ + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }}, + want: confmap.NewFromStringMap(map[string]interface{}{ + "protocols": map[string]interface{}{ + "grpc": map[string]interface{}{ + "endpoint": "0.0.0.0:4315", + }, + "http": map[string]interface{}{ + "endpoint": "0.0.0.0:4316", + }, + }, + }), + }, + } + factory := otlpreceiver.NewFactory() + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.Equal(t, testCase.wantErr, err) + if err == nil { + require.NotNil(t, got) + gotCfg, ok := got.(*otlpreceiver.Config) + require.True(t, ok) + wantCfg := factory.CreateDefaultConfig() + require.NoError(t, component.UnmarshalConfig(testCase.want, wantCfg)) + assert.Equal(t, wantCfg, gotCfg) + } + }) + } +} diff --git a/translator/translate/otel/translate_otel.go b/translator/translate/otel/translate_otel.go index 979d128ec3..9e53cf1689 100644 --- a/translator/translate/otel/translate_otel.go +++ b/translator/translate/otel/translate_otel.go @@ -21,6 +21,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/appsignals" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/containerinsights" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/emf_logs" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/host" @@ -66,6 +67,8 @@ func Translate(jsonConfig interface{}, os string) (*otelcol.Config, error) { }) translators := common.NewTranslatorMap( + appsignals.NewTranslator(component.DataTypeTraces), + appsignals.NewTranslator(component.DataTypeMetrics), host.NewTranslator(common.PipelineNameHost, hostReceivers), host.NewTranslator(common.PipelineNameHostDeltaMetrics, deltaMetricsReceivers), containerinsights.NewTranslator(), diff --git a/translator/translate/otel/translate_otel_test.go b/translator/translate/otel/translate_otel_test.go index c8f4eb85b3..f63df7e725 100644 --- a/translator/translate/otel/translate_otel_test.go +++ b/translator/translate/otel/translate_otel_test.go @@ -46,6 +46,38 @@ func TestTranslator(t *testing.T) { }, }, }, + "WithAppSignalsMetricsEnabled": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + }, + }, + "WithAppSignalsTracesEnabled": { + input: map[string]interface{}{ + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + }, + }, + "WithAppSignalsMetricsAndTracesEnabled": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + "traces": map[string]interface{}{ + "traces_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{}, + }, + }, + }, + }, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) {