diff --git a/opentelemetry-exporter-gcp-monitoring/src/opentelemetry/exporter/cloud_monitoring/__init__.py b/opentelemetry-exporter-gcp-monitoring/src/opentelemetry/exporter/cloud_monitoring/__init__.py index 0c816a86..45c7f2a2 100644 --- a/opentelemetry-exporter-gcp-monitoring/src/opentelemetry/exporter/cloud_monitoring/__init__.py +++ b/opentelemetry-exporter-gcp-monitoring/src/opentelemetry/exporter/cloud_monitoring/__init__.py @@ -36,6 +36,9 @@ # pylint: disable=no-name-in-module from google.protobuf.timestamp_pb2 import Timestamp +from opentelemetry.exporter.cloud_monitoring._resource import ( + get_monitored_resource, +) from opentelemetry.sdk.metrics.export import ( Gauge, Histogram, @@ -105,24 +108,6 @@ def __init__( self._exporter_start_time_nanos, ) = divmod(time_ns(), NANOS_PER_SECOND) - @staticmethod - def _get_monitored_resource( - _: Resource, - ) -> Optional[MonitoredResource]: - """Add Google resource specific information (e.g. instance id, region). - - See - https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom-metric-resources - for supported types - Args: - resource: OTel resource - """ - # TODO: implement new monitored resource mapping spec - return MonitoredResource( - type="generic_node", - labels={"location": "global", "namespace": "", "node_id": ""}, - ) - def _batch_write(self, series: List[TimeSeries]) -> None: """Cloud Monitoring allows writing up to 200 time series at once @@ -289,7 +274,7 @@ def export( all_series = [] for resource_metric in metrics_data.resource_metrics: - monitored_resource = self._get_monitored_resource( + monitored_resource = get_monitored_resource( resource_metric.resource ) for scope_metric in resource_metric.scope_metrics: diff --git a/opentelemetry-exporter-gcp-monitoring/src/opentelemetry/exporter/cloud_monitoring/_resource.py b/opentelemetry-exporter-gcp-monitoring/src/opentelemetry/exporter/cloud_monitoring/_resource.py new file mode 100644 index 00000000..35acf8a6 --- /dev/null +++ b/opentelemetry-exporter-gcp-monitoring/src/opentelemetry/exporter/cloud_monitoring/_resource.py @@ -0,0 +1,225 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from typing import Dict, Optional, Tuple + +from google.api.monitored_resource_pb2 import MonitoredResource +from opentelemetry.sdk.resources import Attributes, Resource + + +# TODO: use opentelemetry-semantic-conventions package for these constants once it has +# stabilized. Right now, pinning an unstable version would cause dependency conflicts for +# users so these are copied in. +class ResourceAttributes: + AWS_EC2 = "aws_ec2" + CLOUD_ACCOUNT_ID = "cloud.account.id" + CLOUD_AVAILABILITY_ZONE = "cloud.availability_zone" + CLOUD_PLATFORM_KEY = "cloud.platform" + CLOUD_PROVIDER = "cloud.provider" + CLOUD_REGION = "cloud.region" + GCP_COMPUTE_ENGINE = "gcp_compute_engine" + GCP_KUBERNETES_ENGINE = "gcp_kubernetes_engine" + HOST_ID = "host.id" + HOST_NAME = "host.name" + K8S_CLUSTER_NAME = "k8s.cluster.name" + K8S_CONTAINER_NAME = "k8s.container.name" + K8S_NAMESPACE_NAME = "k8s.namespace.name" + K8S_NODE_NAME = "k8s.node.name" + K8S_POD_NAME = "k8s.pod.name" + SERVICE_INSTANCE_ID = "service.instance.id" + SERVICE_NAME = "service.name" + SERVICE_NAMESPACE = "service.namespace" + + +AWS_ACCOUNT = "aws_account" +AWS_EC2_INSTANCE = "aws_ec2_instance" +CLUSTER_NAME = "cluster_name" +CONTAINER_NAME = "container_name" +GCE_INSTANCE = "gce_instance" +GENERIC_NODE = "generic_node" +GENERIC_TASK = "generic_task" +INSTANCE_ID = "instance_id" +JOB = "job" +K8S_CLUSTER = "k8s_cluster" +K8S_CONTAINER = "k8s_container" +K8S_NODE = "k8s_node" +K8S_POD = "k8s_pod" +LOCATION = "location" +NAMESPACE = "namespace" +NAMESPACE_NAME = "namespace_name" +NODE_ID = "node_id" +NODE_NAME = "node_name" +POD_NAME = "pod_name" +REGION = "region" +TASK_ID = "task_id" +ZONE = "zone" + + +class MapConfig: + otel_keys: Tuple[str, ...] + """ + OTel resource keys to try and populate the resource label from. For entries with multiple + OTel resource keys, the keys' values will be coalesced in order until there is a non-empty + value. + """ + + fallback: str + """If none of the otelKeys are present in the Resource, fallback to this literal value""" + + def __init__(self, *otel_keys: str, fallback: str = ""): + self.otel_keys = otel_keys + self.fallback = fallback + + +# Mappings of GCM resource label keys onto mapping config from OTel resource for a given +# monitored resource type. Copied from Go impl: +# https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/v1.8.0/internal/resourcemapping/resourcemapping.go#L51 +MAPPINGS = { + GCE_INSTANCE: { + ZONE: MapConfig(ResourceAttributes.CLOUD_AVAILABILITY_ZONE), + INSTANCE_ID: MapConfig(ResourceAttributes.HOST_ID), + }, + K8S_CONTAINER: { + LOCATION: MapConfig( + ResourceAttributes.CLOUD_AVAILABILITY_ZONE, + ResourceAttributes.CLOUD_REGION, + ), + CLUSTER_NAME: MapConfig(ResourceAttributes.K8S_CLUSTER_NAME), + NAMESPACE_NAME: MapConfig(ResourceAttributes.K8S_NAMESPACE_NAME), + POD_NAME: MapConfig(ResourceAttributes.K8S_POD_NAME), + CONTAINER_NAME: MapConfig(ResourceAttributes.K8S_CONTAINER_NAME), + }, + K8S_POD: { + LOCATION: MapConfig( + ResourceAttributes.CLOUD_AVAILABILITY_ZONE, + ResourceAttributes.CLOUD_REGION, + ), + CLUSTER_NAME: MapConfig(ResourceAttributes.K8S_CLUSTER_NAME), + NAMESPACE_NAME: MapConfig(ResourceAttributes.K8S_NAMESPACE_NAME), + POD_NAME: MapConfig(ResourceAttributes.K8S_POD_NAME), + }, + K8S_NODE: { + LOCATION: MapConfig( + ResourceAttributes.CLOUD_AVAILABILITY_ZONE, + ResourceAttributes.CLOUD_REGION, + ), + CLUSTER_NAME: MapConfig(ResourceAttributes.K8S_CLUSTER_NAME), + NODE_NAME: MapConfig(ResourceAttributes.K8S_NODE_NAME), + }, + K8S_CLUSTER: { + LOCATION: MapConfig( + ResourceAttributes.CLOUD_AVAILABILITY_ZONE, + ResourceAttributes.CLOUD_REGION, + ), + CLUSTER_NAME: MapConfig(ResourceAttributes.K8S_CLUSTER_NAME), + }, + AWS_EC2_INSTANCE: { + INSTANCE_ID: MapConfig(ResourceAttributes.HOST_ID), + REGION: MapConfig( + ResourceAttributes.CLOUD_AVAILABILITY_ZONE, + ResourceAttributes.CLOUD_REGION, + ), + AWS_ACCOUNT: MapConfig(ResourceAttributes.CLOUD_ACCOUNT_ID), + }, + GENERIC_TASK: { + LOCATION: MapConfig( + ResourceAttributes.CLOUD_AVAILABILITY_ZONE, + ResourceAttributes.CLOUD_REGION, + fallback="global", + ), + NAMESPACE: MapConfig(ResourceAttributes.SERVICE_NAMESPACE), + JOB: MapConfig(ResourceAttributes.SERVICE_NAME), + TASK_ID: MapConfig(ResourceAttributes.SERVICE_INSTANCE_ID), + }, + GENERIC_NODE: { + LOCATION: MapConfig( + ResourceAttributes.CLOUD_AVAILABILITY_ZONE, + ResourceAttributes.CLOUD_REGION, + fallback="global", + ), + NAMESPACE: MapConfig(ResourceAttributes.SERVICE_NAMESPACE), + NODE_ID: MapConfig( + ResourceAttributes.HOST_ID, ResourceAttributes.HOST_NAME + ), + }, +} + + +def get_monitored_resource( + resource: Resource, +) -> Optional[MonitoredResource]: + """Add Google resource specific information (e.g. instance id, region). + + See + https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom-metric-resources + for supported types + Args: + resource: OTel resource + """ + + attrs = resource.attributes + + platform = attrs.get(ResourceAttributes.CLOUD_PLATFORM_KEY) + if platform == ResourceAttributes.GCP_COMPUTE_ENGINE: + mr = _create_monitored_resource(GCE_INSTANCE, attrs) + elif platform == ResourceAttributes.GCP_KUBERNETES_ENGINE: + if ResourceAttributes.K8S_CONTAINER_NAME in attrs: + mr = _create_monitored_resource(K8S_CONTAINER, attrs) + elif ResourceAttributes.K8S_POD_NAME in attrs: + mr = _create_monitored_resource(K8S_POD, attrs) + elif ResourceAttributes.K8S_NODE_NAME in attrs: + mr = _create_monitored_resource(K8S_NODE, attrs) + else: + mr = _create_monitored_resource(K8S_CLUSTER, attrs) + elif platform == ResourceAttributes.AWS_EC2: + mr = _create_monitored_resource(AWS_EC2_INSTANCE, attrs) + else: + # fallback to generic_task + if ( + ResourceAttributes.SERVICE_NAME in attrs + and ResourceAttributes.SERVICE_INSTANCE_ID in attrs + ): + mr = _create_monitored_resource(GENERIC_TASK, attrs) + else: + mr = _create_monitored_resource(GENERIC_NODE, attrs) + + return mr + + +def _create_monitored_resource( + monitored_resource_type: str, resource_attrs: Attributes +) -> MonitoredResource: + mapping = MAPPINGS[monitored_resource_type] + labels: Dict[str, str] = {} + + for mr_key, map_config in mapping.items(): + mr_value = None + for otel_key in map_config.otel_keys: + if otel_key in resource_attrs: + mr_value = resource_attrs[otel_key] + break + + if mr_value is None: + mr_value = map_config.fallback + + # OTel attribute values can be any of str, bool, int, float, or Sequence of any of + # them. Encode any non-strings as json string + if not isinstance(mr_value, str): + mr_value = json.dumps( + mr_value, sort_keys=True, indent=None, separators=(",", ":") + ) + labels[mr_key] = mr_value + + return MonitoredResource(type=monitored_resource_type, labels=labels) diff --git a/opentelemetry-exporter-gcp-monitoring/tests/__snapshots__/test_cloud_monitoring/test_with_resource.json b/opentelemetry-exporter-gcp-monitoring/tests/__snapshots__/test_cloud_monitoring/test_with_resource.json new file mode 100644 index 00000000..ae7f5b63 --- /dev/null +++ b/opentelemetry-exporter-gcp-monitoring/tests/__snapshots__/test_cloud_monitoring/test_with_resource.json @@ -0,0 +1,64 @@ +{ + "/google.monitoring.v3.MetricService/CreateMetricDescriptor": [ + { + "metricDescriptor": { + "description": "foo", + "displayName": "mycounter", + "labels": [ + { + "key": "string" + }, + { + "key": "int" + }, + { + "key": "float" + } + ], + "metricKind": "CUMULATIVE", + "type": "workload.googleapis.com/mycounter", + "valueType": "INT64" + }, + "name": "projects/fakeproject" + } + ], + "/google.monitoring.v3.MetricService/CreateTimeSeries": [ + { + "name": "projects/fakeproject", + "timeSeries": [ + { + "metric": { + "labels": { + "float": "123.4", + "int": "123", + "string": "string" + }, + "type": "workload.googleapis.com/mycounter" + }, + "metricKind": "CUMULATIVE", + "points": [ + { + "interval": { + "endTime": "str", + "startTime": "str" + }, + "value": { + "int64Value": "12" + } + } + ], + "resource": { + "labels": { + "cluster_name": "mycluster", + "container_name": "mycontainer", + "location": "myavailzone", + "namespace_name": "myns", + "pod_name": "mypod" + }, + "type": "k8s_container" + } + } + ] + } + ] +} diff --git a/opentelemetry-exporter-gcp-monitoring/tests/__snapshots__/test_resource.ambr b/opentelemetry-exporter-gcp-monitoring/tests/__snapshots__/test_resource.ambr new file mode 100644 index 00000000..4a3f2777 --- /dev/null +++ b/opentelemetry-exporter-gcp-monitoring/tests/__snapshots__/test_resource.ambr @@ -0,0 +1,206 @@ +# name: test_get_monitored_resource[aws ec2 region fallback] + dict({ + 'labels': dict({ + 'aws_account': 'myawsaccount', + 'instance_id': 'myhostid', + 'region': 'myregion', + }), + 'type': 'aws_ec2_instance', + }) +# --- +# name: test_get_monitored_resource[aws ec2] + dict({ + 'labels': dict({ + 'aws_account': 'myawsaccount', + 'instance_id': 'myhostid', + 'region': 'myavailzone', + }), + 'type': 'aws_ec2_instance', + }) +# --- +# name: test_get_monitored_resource[empty] + dict({ + 'labels': dict({ + 'location': 'global', + 'namespace': '', + 'node_id': '', + }), + 'type': 'generic_node', + }) +# --- +# name: test_get_monitored_resource[fallback generic node] + dict({ + 'labels': dict({ + 'location': 'global', + 'namespace': '', + 'node_id': '', + }), + 'type': 'generic_node', + }) +# --- +# name: test_get_monitored_resource[gce instance] + dict({ + 'labels': dict({ + 'instance_id': 'myhost', + 'zone': 'foo', + }), + 'type': 'gce_instance', + }) +# --- +# name: test_get_monitored_resource[generic node fallback global] + dict({ + 'labels': dict({ + 'location': 'global', + 'namespace': 'servicens', + 'node_id': 'hostid', + }), + 'type': 'generic_node', + }) +# --- +# name: test_get_monitored_resource[generic node fallback host name] + dict({ + 'labels': dict({ + 'location': 'global', + 'namespace': 'servicens', + 'node_id': 'hostname', + }), + 'type': 'generic_node', + }) +# --- +# name: test_get_monitored_resource[generic node fallback region] + dict({ + 'labels': dict({ + 'location': 'myregion', + 'namespace': 'servicens', + 'node_id': 'hostid', + }), + 'type': 'generic_node', + }) +# --- +# name: test_get_monitored_resource[generic node] + dict({ + 'labels': dict({ + 'location': 'myavailzone', + 'namespace': 'servicens', + 'node_id': 'hostid', + }), + 'type': 'generic_node', + }) +# --- +# name: test_get_monitored_resource[generic task fallback global] + dict({ + 'labels': dict({ + 'job': 'servicename', + 'location': 'global', + 'namespace': 'servicens', + 'task_id': 'serviceinstanceid', + }), + 'type': 'generic_task', + }) +# --- +# name: test_get_monitored_resource[generic task fallback region] + dict({ + 'labels': dict({ + 'job': 'servicename', + 'location': 'myregion', + 'namespace': 'servicens', + 'task_id': 'serviceinstanceid', + }), + 'type': 'generic_task', + }) +# --- +# name: test_get_monitored_resource[generic task] + dict({ + 'labels': dict({ + 'job': 'servicename', + 'location': 'myavailzone', + 'namespace': 'servicens', + 'task_id': 'serviceinstanceid', + }), + 'type': 'generic_task', + }) +# --- +# name: test_get_monitored_resource[k8s cluster region fallback] + dict({ + 'labels': dict({ + 'cluster_name': 'mycluster', + 'location': 'myregion', + }), + 'type': 'k8s_cluster', + }) +# --- +# name: test_get_monitored_resource[k8s cluster] + dict({ + 'labels': dict({ + 'cluster_name': 'mycluster', + 'location': 'myavailzone', + }), + 'type': 'k8s_cluster', + }) +# --- +# name: test_get_monitored_resource[k8s container region fallback] + dict({ + 'labels': dict({ + 'cluster_name': 'mycluster', + 'container_name': 'mycontainer', + 'location': 'myregion', + 'namespace_name': 'myns', + 'pod_name': 'mypod', + }), + 'type': 'k8s_container', + }) +# --- +# name: test_get_monitored_resource[k8s container] + dict({ + 'labels': dict({ + 'cluster_name': 'mycluster', + 'container_name': 'mycontainer', + 'location': 'myavailzone', + 'namespace_name': 'myns', + 'pod_name': 'mypod', + }), + 'type': 'k8s_container', + }) +# --- +# name: test_get_monitored_resource[k8s node region fallback] + dict({ + 'labels': dict({ + 'cluster_name': 'mycluster', + 'location': 'myregion', + 'node_name': 'mynode', + }), + 'type': 'k8s_node', + }) +# --- +# name: test_get_monitored_resource[k8s node] + dict({ + 'labels': dict({ + 'cluster_name': 'mycluster', + 'location': 'myavailzone', + 'node_name': 'mynode', + }), + 'type': 'k8s_node', + }) +# --- +# name: test_get_monitored_resource[k8s pod region fallback] + dict({ + 'labels': dict({ + 'cluster_name': 'mycluster', + 'location': 'myregion', + 'namespace_name': 'myns', + 'pod_name': 'mypod', + }), + 'type': 'k8s_pod', + }) +# --- +# name: test_get_monitored_resource[k8s pod] + dict({ + 'labels': dict({ + 'cluster_name': 'mycluster', + 'location': 'myavailzone', + 'namespace_name': 'myns', + 'pod_name': 'mypod', + }), + 'type': 'k8s_pod', + }) +# --- diff --git a/opentelemetry-exporter-gcp-monitoring/tests/test_cloud_monitoring.py b/opentelemetry-exporter-gcp-monitoring/tests/test_cloud_monitoring.py index 3c67f611..0156f730 100644 --- a/opentelemetry-exporter-gcp-monitoring/tests/test_cloud_monitoring.py +++ b/opentelemetry-exporter-gcp-monitoring/tests/test_cloud_monitoring.py @@ -42,6 +42,7 @@ ExplicitBucketHistogramAggregation, View, ) +from opentelemetry.sdk.resources import Resource from opentelemetry.util.types import Attributes PROJECT_ID = "fakeproject" @@ -215,3 +216,29 @@ def test_invalid_label_keys( counter.add(12, {"1some.invalid$\\key": "value"}) meter_provider.force_flush() assert gcmfake.get_calls() == snapshot_gcmcalls + + +# See additional tests in test_resource.py +def test_with_resource( + gcmfake_meter_provider: GcmFakeMeterProvider, + gcmfake: GcmFake, + snapshot_gcmcalls, +) -> None: + meter_provider = gcmfake_meter_provider( + resource=Resource.create( + { + "cloud.platform": "gcp_kubernetes_engine", + "cloud.availability_zone": "myavailzone", + "k8s.cluster.name": "mycluster", + "k8s.namespace.name": "myns", + "k8s.pod.name": "mypod", + "k8s.container.name": "mycontainer", + }, + ) + ) + counter = meter_provider.get_meter(__name__).create_counter( + "mycounter", description="foo", unit="{myunit}" + ) + counter.add(12, LABELS) + meter_provider.force_flush() + assert gcmfake.get_calls() == snapshot_gcmcalls diff --git a/opentelemetry-exporter-gcp-monitoring/tests/test_resource.py b/opentelemetry-exporter-gcp-monitoring/tests/test_resource.py new file mode 100644 index 00000000..0a00e31f --- /dev/null +++ b/opentelemetry-exporter-gcp-monitoring/tests/test_resource.py @@ -0,0 +1,243 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from google.protobuf import json_format +from opentelemetry.exporter.cloud_monitoring._resource import ( + get_monitored_resource, +) +from opentelemetry.sdk.resources import Attributes, LabelValue, Resource +from syrupy.assertion import SnapshotAssertion + + +@pytest.mark.parametrize( + "otel_attributes", + [ + # GCE + pytest.param( + { + "cloud.platform": "gcp_compute_engine", + "cloud.availability_zone": "foo", + "host.id": "myhost", + }, + id="gce instance", + ), + # k8s container + pytest.param( + { + "cloud.platform": "gcp_kubernetes_engine", + "cloud.availability_zone": "myavailzone", + "k8s.cluster.name": "mycluster", + "k8s.namespace.name": "myns", + "k8s.pod.name": "mypod", + "k8s.container.name": "mycontainer", + }, + id="k8s container", + ), + pytest.param( + { + "cloud.platform": "gcp_kubernetes_engine", + "cloud.region": "myregion", + "k8s.cluster.name": "mycluster", + "k8s.namespace.name": "myns", + "k8s.pod.name": "mypod", + "k8s.container.name": "mycontainer", + }, + id="k8s container region fallback", + ), + # k8s pod + pytest.param( + { + "cloud.platform": "gcp_kubernetes_engine", + "cloud.availability_zone": "myavailzone", + "k8s.cluster.name": "mycluster", + "k8s.namespace.name": "myns", + "k8s.pod.name": "mypod", + }, + id="k8s pod", + ), + pytest.param( + { + "cloud.platform": "gcp_kubernetes_engine", + "cloud.region": "myregion", + "k8s.cluster.name": "mycluster", + "k8s.namespace.name": "myns", + "k8s.pod.name": "mypod", + }, + id="k8s pod region fallback", + ), + # k8s node + pytest.param( + { + "cloud.platform": "gcp_kubernetes_engine", + "cloud.availability_zone": "myavailzone", + "k8s.cluster.name": "mycluster", + "k8s.namespace.name": "myns", + "k8s.node.name": "mynode", + }, + id="k8s node", + ), + pytest.param( + { + "cloud.platform": "gcp_kubernetes_engine", + "cloud.region": "myregion", + "k8s.cluster.name": "mycluster", + "k8s.namespace.name": "myns", + "k8s.node.name": "mynode", + }, + id="k8s node region fallback", + ), + # k8s cluster + pytest.param( + { + "cloud.platform": "gcp_kubernetes_engine", + "cloud.availability_zone": "myavailzone", + "k8s.cluster.name": "mycluster", + "k8s.namespace.name": "myns", + }, + id="k8s cluster", + ), + pytest.param( + { + "cloud.platform": "gcp_kubernetes_engine", + "cloud.region": "myregion", + "k8s.cluster.name": "mycluster", + "k8s.namespace.name": "myns", + }, + id="k8s cluster region fallback", + ), + # aws ec2 + pytest.param( + { + "cloud.platform": "aws_ec2", + "cloud.availability_zone": "myavailzone", + "host.id": "myhostid", + "cloud.account.id": "myawsaccount", + }, + id="aws ec2", + ), + pytest.param( + { + "cloud.platform": "aws_ec2", + "cloud.region": "myregion", + "host.id": "myhostid", + "cloud.account.id": "myawsaccount", + }, + id="aws ec2 region fallback", + ), + # generic task + pytest.param( + { + "cloud.availability_zone": "myavailzone", + "service.namespace": "servicens", + "service.name": "servicename", + "service.instance.id": "serviceinstanceid", + }, + id="generic task", + ), + pytest.param( + { + "cloud.region": "myregion", + "service.namespace": "servicens", + "service.name": "servicename", + "service.instance.id": "serviceinstanceid", + }, + id="generic task fallback region", + ), + pytest.param( + { + "service.namespace": "servicens", + "service.name": "servicename", + "service.instance.id": "serviceinstanceid", + }, + id="generic task fallback global", + ), + # generic node + pytest.param( + { + "cloud.availability_zone": "myavailzone", + "service.namespace": "servicens", + "service.name": "servicename", + "host.id": "hostid", + }, + id="generic node", + ), + pytest.param( + { + "cloud.region": "myregion", + "service.namespace": "servicens", + "service.name": "servicename", + "host.id": "hostid", + }, + id="generic node fallback region", + ), + pytest.param( + { + "service.namespace": "servicens", + "service.name": "servicename", + "host.id": "hostid", + }, + id="generic node fallback global", + ), + pytest.param( + { + "service.namespace": "servicens", + "service.name": "servicename", + "host.name": "hostname", + }, + id="generic node fallback host name", + ), + # fallback empty + pytest.param( + {"foo": "bar", "no.useful": "resourceattribs"}, + id="fallback generic node", + ), + pytest.param( + {}, + id="empty", + ), + ], +) +def test_get_monitored_resource( + otel_attributes: Attributes, snapshot: SnapshotAssertion +) -> None: + resource = Resource.create(otel_attributes) + monitored_resource = get_monitored_resource(resource) + + as_dict = ( + json_format.MessageToDict(monitored_resource) + if monitored_resource + else None + ) + assert as_dict == snapshot + + +@pytest.mark.parametrize( + ("value", "expect"), + [ + (None, ""), + (123, "123"), + (123.4, "123.4"), + ([1, 2, 3, 4], "[1,2,3,4]"), + ([1.1, 2.2, 3.3, 4.4], "[1.1,2.2,3.3,4.4]"), + (["a", "b", "c", "d"], '["a","b","c","d"]'), + ], +) +def test_non_string_values(value: LabelValue, expect: str): + # host.id will end up in generic_node's node_id label + monitored_resource = get_monitored_resource(Resource({"host.id": value})) + assert monitored_resource is not None + + value_as_gcm_label = monitored_resource.labels["node_id"] + assert value_as_gcm_label == expect