From b658113b50f91b13a5bac2b99c526a6ad8107854 Mon Sep 17 00:00:00 2001 From: Paul Van Eck Date: Tue, 16 Dec 2025 01:48:46 +0000 Subject: [PATCH] [Monitor] Generate exporter with TypeSpec Signed-off-by: Paul Van Eck --- .../exporter/_configuration/__init__.py | 67 +- .../exporter/_configuration/_state.py | 2 + .../exporter/_configuration/_utils.py | 45 +- .../exporter/_configuration/_worker.py | 12 +- .../exporter/_connection_string_parser.py | 13 +- .../opentelemetry/exporter/_constants.py | 15 +- .../exporter/_generated/__init__.py | 12 +- .../_generated/_azure_monitor_client.py | 92 - .../exporter/_generated/_metadata.json | 3 + .../exporter/_generated/_patch.py | 32 - .../exporter/_generated/_vendor.py | 16 - .../exporter/_generated/aio/__init__.py | 17 - .../_generated/aio/_azure_monitor_client.py | 74 - .../exporter/_generated/aio/_patch.py | 32 - .../_azure_monitor_client_operations.py | 102 - .../_generated/apiview-properties.json | 27 + .../exporter/_generated/exporter/__init__.py | 32 + .../exporter/_generated/exporter/_client.py | 97 + .../{ => exporter}/_configuration.py | 42 +- .../exporter/_operations/__init__.py | 23 + .../exporter/_operations/_operations.py | 203 ++ .../_generated/exporter/_operations/_patch.py | 21 + .../exporter/_generated/exporter/_patch.py | 21 + .../_utils}/__init__.py | 9 +- .../_generated/exporter/_utils/model_base.py | 1237 ++++++++++ .../exporter/_utils/serialization.py | 2030 +++++++++++++++++ .../_generated/exporter/_utils/utils.py | 25 + .../__init__.py => exporter/_version.py} | 8 +- .../_generated/exporter/aio/__init__.py | 29 + .../_generated/exporter/aio/_client.py | 99 + .../{ => exporter}/aio/_configuration.py | 24 +- .../exporter/aio/_operations/__init__.py | 23 + .../exporter/aio/_operations/_operations.py | 186 ++ .../exporter/aio/_operations/_patch.py | 21 + .../_generated/exporter/aio/_patch.py | 21 + .../_generated/exporter/models/__init__.py | 68 + .../models/_enums.py} | 46 +- .../_generated/exporter/models/_models.py | 995 ++++++++ .../_generated/exporter/models/_patch.py | 21 + .../_generated/{ => exporter}/py.typed | 0 .../exporter/_generated/models/__init__.py | 73 - .../exporter/_generated/models/_models.py | 1167 ---------- .../exporter/_generated/models/_models_py3.py | 1342 ----------- .../_azure_monitor_client_operations.py | 140 -- .../exporter/_generated/tsp-location.yaml | 4 + .../_performance_counters/__init__.py | 2 +- .../_performance_counters/_constants.py | 24 +- .../_performance_counters/_manager.py | 61 +- .../_performance_counters/_processor.py | 2 +- .../exporter/_quickpulse/_constants.py | 4 +- .../exporter/_quickpulse/_cpu.py | 1 + .../exporter/_quickpulse/_exporter.py | 8 +- .../exporter/_quickpulse/_live_metrics.py | 7 +- .../exporter/_quickpulse/_manager.py | 28 +- .../exporter/_quickpulse/_processor.py | 4 +- .../exporter/_quickpulse/_state.py | 2 + .../exporter/_quickpulse/_types.py | 3 +- .../exporter/_quickpulse/_utils.py | 5 +- .../exporter/_quickpulse/_validate.py | 12 +- .../opentelemetry/exporter/_storage.py | 27 +- .../monitor/opentelemetry/exporter/_utils.py | 21 +- .../opentelemetry/exporter/export/_base.py | 83 +- .../exporter/export/logs/_exporter.py | 36 +- .../exporter/export/logs/_processor.py | 9 +- .../exporter/export/metrics/_exporter.py | 21 +- .../exporter/export/trace/_exporter.py | 86 +- .../export/trace/_rate_limited_sampling.py | 11 +- .../exporter/export/trace/_sampling.py | 1 - .../exporter/export/trace/_utils.py | 38 +- .../exporter/statsbeat/__init__.py | 8 +- .../exporter/statsbeat/_manager.py | 48 +- .../exporter/statsbeat/_state.py | 13 +- .../exporter/statsbeat/_statsbeat.py | 6 +- .../exporter/statsbeat/_statsbeat_metrics.py | 3 + .../exporter/statsbeat/_utils.py | 14 +- .../exporter/statsbeat/customer/__init__.py | 2 +- .../statsbeat/customer/_customer_sdkstats.py | 1 + .../exporter/statsbeat/customer/_manager.py | 45 +- .../exporter/statsbeat/customer/_state.py | 8 +- .../exporter/statsbeat/customer/_utils.py | 72 +- .../samples/logs/sample_custom_event.py | 7 +- .../tests/configuration/test_manager.py | 298 ++- .../tests/configuration/test_utils.py | 236 +- .../tests/configuration/test_worker.py | 173 +- .../test_customer_sdkstats.py | 30 +- .../tests/customer_sdk_stats/test_manager.py | 267 ++- .../tests/customer_sdk_stats/test_utlities.py | 230 +- .../tests/logs/test_logs.py | 34 +- .../tests/logs/test_processor.py | 86 +- .../tests/metrics/test_metrics.py | 186 +- .../performance_counters/test_constants.py | 46 +- .../test_performance_counters.py | 249 +- .../performance_counters/test_processor.py | 42 +- .../tests/quickpulse/test_cpu.py | 1 + .../tests/quickpulse/test_filter.py | 28 +- .../tests/quickpulse/test_live_metrics.py | 150 +- .../tests/quickpulse/test_manager.py | 220 +- .../tests/quickpulse/test_processor.py | 3 +- .../tests/quickpulse/test_projection.py | 3 +- .../tests/quickpulse/test_types.py | 57 +- .../tests/statsbeat/test_manager.py | 303 ++- .../tests/statsbeat/test_metrics.py | 4 +- .../tests/statsbeat/test_statsbeat.py | 209 +- .../tests/test_base_customer_sdkstats.py | 141 +- .../tests/test_base_exporter.py | 214 +- .../tests/test_connection_string_parser.py | 72 +- .../tests/test_storage.py | 304 +-- .../tests/test_utils.py | 41 +- .../tests/trace/test_rate_limited_sampling.py | 307 ++- .../tests/trace/test_trace.py | 36 +- .../tests/trace/test_trace_utils.py | 52 +- .../tsp-location.yaml | 2 + 112 files changed, 7701 insertions(+), 5614 deletions(-) delete mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_azure_monitor_client.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_metadata.json delete mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_patch.py delete mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_vendor.py delete mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/__init__.py delete mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/_azure_monitor_client.py delete mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/_patch.py delete mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/operations/_azure_monitor_client_operations.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/apiview-properties.json create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/__init__.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_client.py rename sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/{ => exporter}/_configuration.py (63%) create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_operations/__init__.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_operations/_operations.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_operations/_patch.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_patch.py rename sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/{operations => exporter/_utils}/__init__.py (65%) create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/model_base.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/serialization.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/utils.py rename sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/{aio/operations/__init__.py => exporter/_version.py} (67%) create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/__init__.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_client.py rename sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/{ => exporter}/aio/_configuration.py (67%) create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_operations/__init__.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_operations/_operations.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_operations/_patch.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_patch.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/__init__.py rename sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/{models/_azure_monitor_client_enums.py => exporter/models/_enums.py} (65%) create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/_models.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/_patch.py rename sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/{ => exporter}/py.typed (100%) delete mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/__init__.py delete mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/_models.py delete mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/_models_py3.py delete mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/operations/_azure_monitor_client_operations.py create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/tsp-location.yaml create mode 100644 sdk/monitor/azure-monitor-opentelemetry-exporter/tsp-location.yaml diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/__init__.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/__init__.py index 75cd1982b4e9..99a9a1073ce0 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/__init__.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/__init__.py @@ -24,18 +24,19 @@ @dataclass class _ConfigurationState: """Immutable state object for configuration data.""" + etag: str = "" refresh_interval: int = _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS version_cache: int = -1 settings_cache: Dict[str, str] = field(default_factory=dict) - def with_updates(self, **kwargs) -> '_ConfigurationState': # pylint: disable=C4741,C4742 + def with_updates(self, **kwargs) -> "_ConfigurationState": # pylint: disable=C4741,C4742 """Create a new state object with updated values.""" return _ConfigurationState( - etag=kwargs.get('etag', self.etag), - refresh_interval=kwargs.get('refresh_interval', self.refresh_interval), - version_cache=kwargs.get('version_cache', self.version_cache), - settings_cache=kwargs.get('settings_cache', self.settings_cache.copy()) + etag=kwargs.get("etag", self.etag), + refresh_interval=kwargs.get("refresh_interval", self.refresh_interval), + version_cache=kwargs.get("version_cache", self.version_cache), + settings_cache=kwargs.get("settings_cache", self.settings_cache.copy()), ) @@ -96,16 +97,16 @@ def _is_transient_error(self, response: OneSettingsResponse) -> bool: # pylint: disable=too-many-statements, too-many-branches def get_configuration_and_refresh_interval(self, query_dict: Optional[Dict[str, str]] = None) -> int: """Fetch configuration from OneSettings and update local cache atomically. - + This method performs a conditional HTTP request to OneSettings using the current ETag for efficient caching. It atomically updates the local configuration state with any new settings and manages version tracking for change detection. - + When transient errors are encountered (timeouts, network exceptions, or HTTP status - codes 429, 500-504) from the CHANGE endpoint, the method doubles the current refresh - interval to reduce load on the failing service and returns immediately. The refresh + codes 429, 500-504) from the CHANGE endpoint, the method doubles the current refresh + interval to reduce load on the failing service and returns immediately. The refresh interval is capped at 24 hours (86,400 seconds) to prevent excessively long delays. - + The method implements a check-and-set pattern for thread safety: 1. Reads current state atomically to prepare request headers 2. Makes HTTP request to OneSettings CHANGE endpoint outside locks @@ -114,12 +115,12 @@ def get_configuration_and_refresh_interval(self, query_dict: Optional[Dict[str, 4. Re-reads current state to make version comparison decisions 5. Conditionally fetches from CONFIG endpoint if version increased 6. Updates all state fields atomically in a single operation - + Version comparison logic: - Version increase: New configuration available, fetches and caches new settings - Version same: No changes detected, ETag and refresh interval updated safely - Version decrease: Unexpected rollback state, logged as warning, no updates applied - + Error handling: - Transient errors (timeouts, exceptions, retryable HTTP codes) from CHANGE endpoint: Refresh interval doubled (capped), immediate return @@ -134,31 +135,31 @@ def get_configuration_and_refresh_interval(self, query_dict: Optional[Dict[str, :return: Updated refresh interval in seconds for the next configuration check. This value comes from the OneSettings response or is doubled (capped at 24 hours) - if transient errors are encountered from the CHANGE endpoint, determining how + if transient errors are encountered from the CHANGE endpoint, determining how frequently the background worker should call this method. :rtype: int - + Thread Safety: This method is thread-safe using atomic state updates. Multiple threads can call this method concurrently without data corruption. The implementation uses a single state lock with minimal critical sections to reduce lock contention. - + HTTP requests are performed outside locks to prevent blocking other threads during potentially slow network operations. - + Caching Behavior: The method automatically includes ETag headers for conditional requests to minimize unnecessary data transfer. If the server responds with 304 Not Modified, only the refresh interval is updated while preserving existing configuration. - + On CONFIG endpoint failures, the ETag is intentionally not updated to ensure the next request can retry fetching the same configuration version. - + State Consistency: All configuration state (ETag, refresh interval, version, settings) is updated atomically using immutable state objects. This prevents race conditions where different threads might observe inconsistent combinations of these values. - + Transient Error Handling: When transient errors are detected from the CHANGE endpoint (including timeouts, network exceptions, or retryable HTTP status codes), the refresh interval is @@ -198,9 +199,9 @@ def get_configuration_and_refresh_interval(self, query_dict: Optional[Dict[str, # Prepare new state updates new_state_updates = {} if response.etag is not None: - new_state_updates['etag'] = response.etag + new_state_updates["etag"] = response.etag if response.refresh_interval and response.refresh_interval > 0: # type: ignore - new_state_updates['refresh_interval'] = response.refresh_interval # type: ignore + new_state_updates["refresh_interval"] = response.refresh_interval # type: ignore if response.status_code == 304: # Not modified: Settings unchanged, but update etag and refresh interval if provided @@ -228,24 +229,26 @@ def get_configuration_and_refresh_interval(self, query_dict: Optional[Dict[str, if config_response.status_code == 200 and config_response.settings: # Validate that the versions from change and config match if config_response.version == response.version: - new_state_updates.update({ - 'version_cache': response.version, # type: ignore - 'settings_cache': config_response.settings # type: ignore - }) + new_state_updates.update( + { + "version_cache": response.version, # type: ignore + "settings_cache": config_response.settings, # type: ignore + } + ) else: - logger.warning("Version mismatch between change and config responses." \ - "No configurations updated.") + logger.warning( + "Version mismatch between change and config responses. No configurations updated." + ) # We do not update etag to allow retry on next call - new_state_updates.pop('etag', None) + new_state_updates.pop("etag", None) else: logger.warning("Unexpected response status: %d", config_response.status_code) # We do not update etag to allow retry on next call - new_state_updates.pop('etag', None) + new_state_updates.pop("etag", None) else: # No settings or version provided logger.warning("No settings or version provided in config response. Config not updated.") - notify_callbacks = False current_refresh_interval = _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS state_for_callbacks = None @@ -255,7 +258,7 @@ def get_configuration_and_refresh_interval(self, query_dict: Optional[Dict[str, latest_state = self._current_state # Always use latest state self._current_state = latest_state.with_updates(**new_state_updates) current_refresh_interval = self._current_state.refresh_interval - if 'settings_cache' in new_state_updates: + if "settings_cache" in new_state_updates: notify_callbacks = True state_for_callbacks = self._current_state @@ -270,7 +273,7 @@ def get_settings(self) -> Dict[str, str]: # pylint: disable=C4741,C4742 with self._state_lock: return self._current_state.settings_cache.copy() # type: ignore - def get_current_version(self) -> int: # type: ignore # pylint: disable=C4741,C4742 + def get_current_version(self) -> int: # type: ignore # pylint: disable=C4741,C4742 """Get current version.""" with self._state_lock: return self._current_state.version_cache # type: ignore diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/_state.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/_state.py index 19e997dda0c0..6e2ff3e2e928 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/_state.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/_state.py @@ -15,6 +15,7 @@ # Global singleton instance for easy access throughout the codebase _configuration_manager = None + def get_configuration_manager() -> Optional["_ConfigurationManager"]: """Get the global Configuration Manager singleton instance. @@ -30,5 +31,6 @@ def get_configuration_manager() -> Optional["_ConfigurationManager"]: global _configuration_manager # pylint: disable=global-statement if _configuration_manager is None: from azure.monitor.opentelemetry.exporter._configuration import _ConfigurationManager + _configuration_manager = _ConfigurationManager() return _configuration_manager diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/_utils.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/_utils.py index 45cd99a0f0f6..91c792861e23 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/_utils.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/_utils.py @@ -4,6 +4,7 @@ from typing import Dict, Optional, Any import json import logging + # mypy: disable-error-code="import-untyped" import requests @@ -18,6 +19,7 @@ class _ConfigurationProfile: """Profile for the current running SDK.""" + os: str = "" rp: str = "" attach: str = "" @@ -28,18 +30,18 @@ class _ConfigurationProfile: @classmethod def fill(cls, **kwargs) -> None: """Update only the class variables that are provided in kwargs and haven't been updated yet.""" - if 'os' in kwargs and cls.os == "": - cls.os = kwargs['os'] - if 'version' in kwargs and cls.version == "": - cls.version = kwargs['version'] - if 'component' in kwargs and cls.component == "": - cls.component = kwargs['component'] - if 'rp' in kwargs and cls.rp == "": - cls.rp = kwargs['rp'] - if 'attach' in kwargs and cls.attach == "": - cls.attach = kwargs['attach'] - if 'region' in kwargs and cls.region == "": - cls.region = kwargs['region'] + if "os" in kwargs and cls.os == "": + cls.os = kwargs["os"] + if "version" in kwargs and cls.version == "": + cls.version = kwargs["version"] + if "component" in kwargs and cls.component == "": + cls.component = kwargs["component"] + if "rp" in kwargs and cls.rp == "": + cls.rp = kwargs["rp"] + if "attach" in kwargs and cls.attach == "": + cls.attach = kwargs["attach"] + if "region" in kwargs and cls.region == "": + cls.region = kwargs["region"] class OneSettingsResponse: @@ -64,7 +66,7 @@ def __init__( settings: Optional[Dict[str, str]] = None, version: Optional[int] = None, status_code: int = 200, - has_exception: bool = False + has_exception: bool = False, ): """Initialize OneSettingsResponse with configuration data. @@ -86,8 +88,9 @@ def __init__( self.has_exception = has_exception -def make_onesettings_request(url: str, query_dict: Optional[Dict[str, str]] = None, - headers: Optional[Dict[str, str]] = None) -> OneSettingsResponse: +def make_onesettings_request( + url: str, query_dict: Optional[Dict[str, str]] = None, headers: Optional[Dict[str, str]] = None +) -> OneSettingsResponse: """Make an HTTP request to the OneSettings API and parse the response. This function handles the complete OneSettings request lifecycle including: @@ -326,6 +329,7 @@ def _matches_override_rule(override_rule: Dict[str, Any]) -> bool: # All conditions in this rule matched return True + # pylint:disable=too-many-return-statements def _matches_condition(condition_key: str, condition_value: Any) -> bool: """Check if a specific condition matches the current configuration profile. @@ -458,14 +462,15 @@ def _parse_version_with_beta(version: str) -> tuple: :rtype: tuple """ # Check if version contains beta suffix - if 'b' in version: + if "b" in version: # Split on 'b' to separate base version and beta number - base_version, beta_part = version.split('b', 1) - base_parts = [int(x) for x in base_version.split('.')] + base_version, beta_part = version.split("b", 1) + base_parts = [int(x) for x in base_version.split(".")] beta_number = int(beta_part) if beta_part.isdigit() else 0 return tuple(base_parts + [beta_number]) # Release version - use infinity for beta part so it sorts after beta versions - base_parts = [int(x) for x in version.split('.')] - return tuple(base_parts + [float('inf')]) + base_parts = [int(x) for x in version.split(".")] + return tuple(base_parts + [float("inf")]) + # cSpell:enable diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/_worker.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/_worker.py index a5a5705a709a..19e7dc197ec2 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/_worker.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_configuration/_worker.py @@ -8,6 +8,7 @@ logger = logging.getLogger(__name__) + class _ConfigurationWorker: """Background worker thread for periodic configuration refresh from OneSettings. @@ -49,11 +50,7 @@ def __init__(self, configuration_manager, refresh_interval=None) -> None: self._lock = threading.Lock() # Single lock for all worker state self._shutdown_event = threading.Event() - self._refresh_thread = threading.Thread( - target=self._get_configuration, - name="ConfigurationWorker", - daemon=True - ) + self._refresh_thread = threading.Thread(target=self._get_configuration, name="ConfigurationWorker", daemon=True) self._refresh_interval = refresh_interval or self._default_refresh_interval self._shutdown_event.clear() self._refresh_thread.start() @@ -138,8 +135,9 @@ def _get_configuration(self) -> None: while not self._shutdown_event.is_set(): try: with self._lock: - self._refresh_interval = \ - self._configuration_manager.get_configuration_and_refresh_interval(_ONE_SETTINGS_PYTHON_TARGETING) + self._refresh_interval = self._configuration_manager.get_configuration_and_refresh_interval( + _ONE_SETTINGS_PYTHON_TARGETING + ) # Capture interval while we have the lock interval = self._refresh_interval except Exception as ex: # pylint: disable=broad-exception-caught diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_connection_string_parser.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_connection_string_parser.py index 8728415c8ba3..80c341ee8d9e 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_connection_string_parser.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_connection_string_parser.py @@ -54,26 +54,21 @@ def _initialize(self) -> None: # 3. Key from connection string in environment variable # 4. Key from instrumentation key in environment variable self.instrumentation_key = ( - code_cs.get(INSTRUMENTATION_KEY) or code_ikey or \ - env_cs.get(INSTRUMENTATION_KEY) or env_ikey # type: ignore + code_cs.get(INSTRUMENTATION_KEY) or code_ikey or env_cs.get(INSTRUMENTATION_KEY) or env_ikey # type: ignore ) # The priority of the endpoints is as follows: # 1. The endpoint explicitly passed in connection string # 2. The endpoint from the connection string in environment variable # 3. The default breeze endpoint self.endpoint = ( - code_cs.get(INGESTION_ENDPOINT) or env_cs.get(INGESTION_ENDPOINT) or \ - "https://dc.services.visualstudio.com" + code_cs.get(INGESTION_ENDPOINT) or env_cs.get(INGESTION_ENDPOINT) or "https://dc.services.visualstudio.com" ) self.live_endpoint = ( - code_cs.get(LIVE_ENDPOINT) or env_cs.get(LIVE_ENDPOINT) or \ - "https://rt.services.visualstudio.com" + code_cs.get(LIVE_ENDPOINT) or env_cs.get(LIVE_ENDPOINT) or "https://rt.services.visualstudio.com" ) # The AUDIENCE is a url that identifies Azure Monitor in a specific cloud # (For example: "https://monitor.azure.com/"). - self.aad_audience = ( - code_cs.get(AAD_AUDIENCE) or env_cs.get(AAD_AUDIENCE) # type: ignore - ) + self.aad_audience = code_cs.get(AAD_AUDIENCE) or env_cs.get(AAD_AUDIENCE) # type: ignore # Extract region information self.region = self._extract_region() # type: ignore diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_constants.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_constants.py index 05a6d26889a1..27f4d76defdd 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_constants.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_constants.py @@ -162,6 +162,7 @@ _APPLICATIONINSIGHTS_SDKSTATS_EXPORT_INTERVAL = "APPLICATIONINSIGHTS_SDKSTATS_EXPORT_INTERVAL" _CUSTOMER_SDKSTATS_LANGUAGE = "python" + class DropCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): CLIENT_READONLY = "CLIENT_READONLY" CLIENT_EXCEPTION = "CLIENT_EXCEPTION" @@ -169,20 +170,25 @@ class DropCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): CLIENT_STORAGE_DISABLED = "CLIENT_STORAGE_DISABLED" UNKNOWN = "UNKNOWN" + DropCodeType = Union[DropCode, int] + class RetryCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): CLIENT_EXCEPTION = "CLIENT_EXCEPTION" CLIENT_TIMEOUT = "CLIENT_TIMEOUT" UNKNOWN = "UNKNOWN" + RetryCodeType = Union[RetryCode, int] + class CustomerSdkStatsMetricName(str, Enum, metaclass=CaseInsensitiveEnumMeta): ITEM_SUCCESS_COUNT = "preview.item.success.count" ITEM_DROP_COUNT = "preview.item.dropped.count" ITEM_RETRY_COUNT = "preview.item.retry.count" + ## Map from Azure Monitor envelope base_types to TelemetryType _TYPE_MAP = { "EventData": _CUSTOM_EVENT, @@ -196,6 +202,7 @@ class CustomerSdkStatsMetricName(str, Enum, metaclass=CaseInsensitiveEnumMeta): "AvailabilityData": _AVAILABILITY, } + # Exception categories class _exception_categories(Enum): CLIENT_EXCEPTION = "Client exception" @@ -203,6 +210,7 @@ class _exception_categories(Enum): NETWORK_EXCEPTION = "Network exception" TIMEOUT_EXCEPTION = "Timeout exception" + # Map RP names class _RP_Names(Enum): APP_SERVICE = "appsvc" @@ -211,6 +219,7 @@ class _RP_Names(Enum): VM = "vm" UNKNOWN = "unknown" + # Instrumentations # Special constant for azure-sdk opentelemetry instrumentation @@ -276,7 +285,7 @@ class _RP_Names(Enum): "openai_v2", "vertexai", # Instrumentations below this line have not been added to statsbeat report yet - _AZURE_AI_SDK_NAME + _AZURE_AI_SDK_NAME, ] _INSTRUMENTATIONS_BIT_MAP = {_INSTRUMENTATIONS_LIST[i]: _BASE**i for i in range(len(_INSTRUMENTATIONS_LIST))} @@ -317,8 +326,8 @@ class _RP_Names(Enum): _SAMPLE_RATE_KEY = "_MS.sampleRate" _SAMPLING_HASH = 5381 -_INT32_MAX: int = 2**31 - 1 # 2147483647 -_INT32_MIN: int = -2**31 # -2147483648 +_INT32_MAX: int = 2**31 - 1 # 2147483647 +_INT32_MIN: int = -(2**31) # -2147483648 # AAD Auth diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/__init__.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/__init__.py index 6d010691a414..0b929d3e8efb 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/__init__.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/__init__.py @@ -2,16 +2,6 @@ # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. +# Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- - -from ._azure_monitor_client import AzureMonitorClient - -__all__ = ["AzureMonitorClient"] - -# `._patch.py` is used for handwritten extensions to the generated code -# Example: https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md -from ._patch import patch_sdk - -patch_sdk() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_azure_monitor_client.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_azure_monitor_client.py deleted file mode 100644 index 6ea083bbd4c4..000000000000 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_azure_monitor_client.py +++ /dev/null @@ -1,92 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import TYPE_CHECKING - -from msrest import Deserializer, Serializer - -from azure.core import PipelineClient - -from . import models -from ._configuration import AzureMonitorClientConfiguration -from .operations import AzureMonitorClientOperationsMixin - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - - from azure.core.rest import HttpRequest, HttpResponse - - -class AzureMonitorClient(AzureMonitorClientOperationsMixin): - """OpenTelemetry Exporter for Azure Monitor. - - :param host: Breeze endpoint: https://dc.services.visualstudio.com. Default value is - "https://dc.services.visualstudio.com". - :type host: str - """ - - def __init__( - self, - host="https://dc.services.visualstudio.com", # type: str - **kwargs # type: Any - ): - # type: (...) -> None - _base_url = "{Host}/v2.1" - self._config = AzureMonitorClientConfiguration(host=host, **kwargs) - self._client = PipelineClient(base_url=_base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - self._serialize.client_side_validation = False - - def _send_request( - self, - request, # type: HttpRequest - **kwargs # type: Any - ): - # type: (...) -> HttpResponse - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = client._send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.HttpResponse - """ - - request_copy = deepcopy(request) - path_format_arguments = { - "Host": self._serialize.url("self._config.host", self._config.host, "str", skip_quote=True), - } - - request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) - return self._client.send_request(request_copy, **kwargs) - - def close(self): - # type: () -> None - self._client.close() - - def __enter__(self): - # type: () -> AzureMonitorClient - self._client.__enter__() - return self - - def __exit__(self, *exc_details): - # type: (Any) -> None - self._client.__exit__(*exc_details) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_metadata.json b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_metadata.json new file mode 100644 index 000000000000..bdbed579ffb9 --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_metadata.json @@ -0,0 +1,3 @@ +{ + "apiVersion": "v2.1" +} \ No newline at end of file diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_patch.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_patch.py deleted file mode 100644 index 17dbc073e01b..000000000000 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_patch.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- - - -# This file is used for handwritten extensions to the generated code. Example: -# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md -def patch_sdk(): - pass diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_vendor.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_vendor.py deleted file mode 100644 index 0dafe0e287ff..000000000000 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_vendor.py +++ /dev/null @@ -1,16 +0,0 @@ -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.pipeline.transport import HttpRequest - - -def _convert_request(request, files=None): - data = request.content if not files else None - request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data) - if files: - request.set_formdata_body(files) - return request diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/__init__.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/__init__.py deleted file mode 100644 index 6d010691a414..000000000000 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_monitor_client import AzureMonitorClient - -__all__ = ["AzureMonitorClient"] - -# `._patch.py` is used for handwritten extensions to the generated code -# Example: https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md -from ._patch import patch_sdk - -patch_sdk() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/_azure_monitor_client.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/_azure_monitor_client.py deleted file mode 100644 index e829f0d44fa6..000000000000 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/_azure_monitor_client.py +++ /dev/null @@ -1,74 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import Any, Awaitable - -from msrest import Deserializer, Serializer - -from azure.core import AsyncPipelineClient -from azure.core.rest import AsyncHttpResponse, HttpRequest - -from .. import models -from ._configuration import AzureMonitorClientConfiguration -from .operations import AzureMonitorClientOperationsMixin - - -class AzureMonitorClient(AzureMonitorClientOperationsMixin): - """OpenTelemetry Exporter for Azure Monitor. - - :param host: Breeze endpoint: https://dc.services.visualstudio.com. Default value is - "https://dc.services.visualstudio.com". - :type host: str - """ - - def __init__(self, host: str = "https://dc.services.visualstudio.com", **kwargs: Any) -> None: - _base_url = "{Host}/v2.1" - self._config = AzureMonitorClientConfiguration(host=host, **kwargs) - self._client = AsyncPipelineClient(base_url=_base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - self._serialize.client_side_validation = False - - def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = await client._send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.AsyncHttpResponse - """ - - request_copy = deepcopy(request) - path_format_arguments = { - "Host": self._serialize.url("self._config.host", self._config.host, "str", skip_quote=True), - } - - request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) - return self._client.send_request(request_copy, **kwargs) - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "AzureMonitorClient": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details) -> None: - await self._client.__aexit__(*exc_details) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/_patch.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/_patch.py deleted file mode 100644 index 17dbc073e01b..000000000000 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/_patch.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- - - -# This file is used for handwritten extensions to the generated code. Example: -# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md -def patch_sdk(): - pass diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/operations/_azure_monitor_client_operations.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/operations/_azure_monitor_client_operations.py deleted file mode 100644 index 72c70dab39a3..000000000000 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/operations/_azure_monitor_client_operations.py +++ /dev/null @@ -1,102 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, List, Optional, TypeVar - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse -from azure.core.rest import HttpRequest - -from ... import models as _models -from ..._vendor import _convert_request -from ...operations._azure_monitor_client_operations import build_track_request - -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - - -class AzureMonitorClientOperationsMixin: - - async def track(self, body: List["_models.TelemetryItem"], **kwargs: Any) -> "_models.TrackResponse": - """Track telemetry events. - - This operation sends a sequence of telemetry events that will be monitored by Azure Monitor. - - :param body: The list of telemetry events to track. - :type body: list[~azure_monitor_client.models.TelemetryItem] - :keyword callable cls: A custom type or function that will be passed the direct response - :return: TrackResponse, or the result of cls(response) - :rtype: ~azure_monitor_client.models.TrackResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop("cls", None) # type: ClsType["_models.TrackResponse"] - error_map = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 400: lambda response: HttpResponseError( - response=response, model=self._deserialize(_models.TrackResponse, response) - ), - 402: lambda response: HttpResponseError( - response=response, model=self._deserialize(_models.TrackResponse, response) - ), - 429: lambda response: HttpResponseError( - response=response, model=self._deserialize(_models.TrackResponse, response) - ), - 500: lambda response: HttpResponseError( - response=response, model=self._deserialize(_models.TrackResponse, response) - ), - 503: lambda response: HttpResponseError( - response=response, model=self._deserialize(_models.TrackResponse, response) - ), - } - error_map.update(kwargs.pop("error_map", {})) - - content_type = kwargs.pop("content_type", "application/json") # type: Optional[str] - - _json = self._serialize.body(body, "[TelemetryItem]") - - request = build_track_request( - content_type=content_type, - json=_json, - template_url=self.track.metadata["url"], - ) - request = _convert_request(request) - path_format_arguments = { - "Host": self._serialize.url("self._config.host", self._config.host, "str", skip_quote=True), - } - request.url = self._client.format_url(request.url, **path_format_arguments) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, stream=False, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if response.status_code == 200: - deserialized = self._deserialize("TrackResponse", pipeline_response) - - if response.status_code == 206: - deserialized = self._deserialize("TrackResponse", pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - - track.metadata = {"url": "/track"} # type: ignore diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/apiview-properties.json b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/apiview-properties.json new file mode 100644 index 000000000000..40b451555a7a --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/apiview-properties.json @@ -0,0 +1,27 @@ +{ + "CrossLanguagePackageId": "AzureMonitorExporter", + "CrossLanguageDefinitionId": { + "exporter.models.MonitorDomain": "AzureMonitorExporter.Domain", + "exporter.models.MessageData": "AzureMonitorExporter.MessageData", + "exporter.models.MetricDataPoint": "AzureMonitorExporter.DataPoint", + "exporter.models.MetricsData": "AzureMonitorExporter.MetricsData", + "exporter.models.MonitorBase": "AzureMonitorExporter.Base", + "exporter.models.PageViewData": "AzureMonitorExporter.PageViewData", + "exporter.models.PageViewPerfData": "AzureMonitorExporter.PageViewPerfData", + "exporter.models.RemoteDependencyData": "AzureMonitorExporter.RemoteDependencyData", + "exporter.models.RequestData": "AzureMonitorExporter.RequestData", + "exporter.models.StackFrame": "AzureMonitorExporter.StackFrame", + "exporter.models.TelemetryErrorDetails": "AzureMonitorExporter.ErrorDetails", + "exporter.models.TelemetryEventData": "AzureMonitorExporter.EventData", + "exporter.models.TelemetryExceptionData": "AzureMonitorExporter.ExceptionData", + "exporter.models.TelemetryExceptionDetails": "AzureMonitorExporter.ExceptionDetails", + "exporter.models.TelemetryItem": "AzureMonitorExporter.TelemetryEnvelope", + "exporter.models.TrackResponse": "AzureMonitorExporter.TrackResponse", + "exporter.models.Versions": "AzureMonitorExporter.Versions", + "exporter.models.DataPointType": "AzureMonitorExporter.DataPointType", + "exporter.models.SeverityLevel": "AzureMonitorExporter.SeverityLevel", + "exporter.models.ContextTagKeys": "AzureMonitorExporter.ContextTagKeys", + "exporter.AzureMonitorExporterClient.track": "AzureMonitorExporter.track", + "exporter.aio.AzureMonitorExporterClient.track": "AzureMonitorExporter.track" + } +} \ No newline at end of file diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/__init__.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/__init__.py new file mode 100644 index 000000000000..0adaaf94ea13 --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/__init__.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import AzureMonitorExporterClient # type: ignore +from ._version import VERSION + +__version__ = VERSION + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AzureMonitorExporterClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_client.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_client.py new file mode 100644 index 000000000000..6b9d3d7bf928 --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_client.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any +from typing_extensions import Self + +from azure.core import PipelineClient +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse + +from ._configuration import AzureMonitorExporterClientConfiguration +from ._operations import _AzureMonitorExporterClientOperationsMixin +from ._utils.serialization import Deserializer, Serializer + + +class AzureMonitorExporterClient(_AzureMonitorExporterClientOperationsMixin): + """OpenTelemetry Exporter for Azure Monitor. + + :keyword host: Application Insights' Breeze endpoint. Default value is + "https://dc.services.visualstudio.com". + :paramtype host: str + :keyword api_version: The service API version. Known values are "v2.1" and None. Default value + is "v2.1". Note that overriding this default value may result in unsupported behavior. + :paramtype api_version: str or ~exporter.models.Versions + """ + + def __init__( # pylint: disable=missing-client-constructor-parameter-credential + self, *, host: str = "https://dc.services.visualstudio.com", **kwargs: Any + ) -> None: + _endpoint = "{host}/{apiVersion}" + self._config = AzureMonitorExporterClientConfiguration(host=host, **kwargs) + + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "host": self._serialize.url("self._config.host", self._config.host, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> Self: + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_configuration.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_configuration.py similarity index 63% rename from sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_configuration.py rename to sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_configuration.py index 8fa05dbefe89..1db247b14e50 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/_configuration.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_configuration.py @@ -2,57 +2,47 @@ # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. +# Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import TYPE_CHECKING +from typing import Any -from azure.core.configuration import Configuration from azure.core.pipeline import policies -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any +from ._version import VERSION -VERSION = "unknown" - -class AzureMonitorClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes - """Configuration for AzureMonitorClient. +class AzureMonitorExporterClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for AzureMonitorExporterClient. Note that all parameters used to create this instance are saved as instance attributes. - :param host: Breeze endpoint: https://dc.services.visualstudio.com. Default value is + :param host: Application Insights' Breeze endpoint. Default value is "https://dc.services.visualstudio.com". :type host: str + :keyword api_version: The service API version. Known values are "v2.1" and None. Default value + is "v2.1". Note that overriding this default value may result in unsupported behavior. + :paramtype api_version: str or ~exporter.models.Versions """ - def __init__( - self, - host="https://dc.services.visualstudio.com", # type: str - **kwargs # type: Any - ): - # type: (...) -> None - super(AzureMonitorClientConfiguration, self).__init__(**kwargs) - if host is None: - raise ValueError("Parameter 'host' must not be None.") + def __init__(self, host: str = "https://dc.services.visualstudio.com", **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "v2.1") self.host = host - kwargs.setdefault("sdk_moniker", "azuremonitorclient/{}".format(VERSION)) + self.api_version = api_version + kwargs.setdefault("sdk_moniker", "monitor-opentelemetry-exporter/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) - def _configure( - self, **kwargs # type: Any - ): - # type: (...) -> None + def _configure(self, **kwargs: Any) -> None: self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) self.authentication_policy = kwargs.get("authentication_policy") diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_operations/__init__.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_operations/__init__.py new file mode 100644 index 000000000000..39274f6e0168 --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_operations/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import _AzureMonitorExporterClientOperationsMixin # type: ignore # pylint: disable=unused-import + +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_operations/_operations.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_operations/_operations.py new file mode 100644 index 000000000000..879d68bf9e0a --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_operations/_operations.py @@ -0,0 +1,203 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from collections.abc import MutableMapping +from io import IOBase +import json +from typing import Any, Callable, IO, Optional, TypeVar, Union, overload + +from azure.core import PipelineClient +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._configuration import AzureMonitorExporterClientConfiguration +from .._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize +from .._utils.serialization import Serializer +from .._utils.utils import ClientMixinABC + +JSON = MutableMapping[str, Any] +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_azure_monitor_exporter_track_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/track" + + # Construct headers + if content_type is not None: + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +class _AzureMonitorExporterClientOperationsMixin( + ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], AzureMonitorExporterClientConfiguration] +): + + @overload + def track( + self, body: list[_models.TelemetryItem], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.TrackResponse: + """Track telemetry events. + + This operation sends a sequence of telemetry events that will be monitored by + Azure Monitor. + + :param body: The list of telemetry events to track. Required. + :type body: list[~exporter.models.TelemetryItem] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: TrackResponse. The TrackResponse is compatible with MutableMapping + :rtype: ~exporter.models.TrackResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def track( + self, body: list[JSON], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.TrackResponse: + """Track telemetry events. + + This operation sends a sequence of telemetry events that will be monitored by + Azure Monitor. + + :param body: The list of telemetry events to track. Required. + :type body: list[JSON] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: TrackResponse. The TrackResponse is compatible with MutableMapping + :rtype: ~exporter.models.TrackResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def track(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.TrackResponse: + """Track telemetry events. + + This operation sends a sequence of telemetry events that will be monitored by + Azure Monitor. + + :param body: The list of telemetry events to track. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: TrackResponse. The TrackResponse is compatible with MutableMapping + :rtype: ~exporter.models.TrackResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def track( + self, body: Union[list[_models.TelemetryItem], list[JSON], IO[bytes]], **kwargs: Any + ) -> _models.TrackResponse: + """Track telemetry events. + + This operation sends a sequence of telemetry events that will be monitored by + Azure Monitor. + + :param body: The list of telemetry events to track. Is one of the following types: + [TelemetryItem], [JSON], IO[bytes] Required. + :type body: list[~exporter.models.TelemetryItem] or list[JSON] or IO[bytes] + :return: TrackResponse. The TrackResponse is compatible with MutableMapping + :rtype: ~exporter.models.TrackResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + cls: ClsType[_models.TrackResponse] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_azure_monitor_exporter_track_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "host": self._serialize.url("self._config.host", self._config.host, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = None + if response.status_code == 400: + error = _failsafe_deserialize(_models.TrackResponse, response) + elif response.status_code == 402: + error = _failsafe_deserialize(_models.TrackResponse, response) + elif response.status_code == 429: + error = _failsafe_deserialize(_models.TrackResponse, response) + elif response.status_code == 500: + error = _failsafe_deserialize(_models.TrackResponse, response) + elif response.status_code == 503: + error = _failsafe_deserialize(_models.TrackResponse, response) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TrackResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_operations/_patch.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_operations/_patch.py new file mode 100644 index 000000000000..87676c65a8f0 --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_operations/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" + + +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_patch.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_patch.py new file mode 100644 index 000000000000..87676c65a8f0 --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" + + +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/operations/__init__.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/__init__.py similarity index 65% rename from sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/operations/__init__.py rename to sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/__init__.py index a1b6ce78bd5c..8026245c2abc 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/operations/__init__.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/__init__.py @@ -1,13 +1,6 @@ -# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. +# Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- - -from ._azure_monitor_client_operations import AzureMonitorClientOperationsMixin - -__all__ = [ - "AzureMonitorClientOperationsMixin", -] diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/model_base.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/model_base.py new file mode 100644 index 000000000000..12926fa98dcf --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/model_base.py @@ -0,0 +1,1237 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access, broad-except + +import copy +import calendar +import decimal +import functools +import sys +import logging +import base64 +import re +import typing +import enum +import email.utils +from datetime import datetime, date, time, timedelta, timezone +from json import JSONEncoder +import xml.etree.ElementTree as ET +from collections.abc import MutableMapping +from typing_extensions import Self +import isodate +from azure.core.exceptions import DeserializationError +from azure.core import CaseInsensitiveEnumMeta +from azure.core.pipeline import PipelineResponse +from azure.core.serialization import _Null +from azure.core.rest import HttpResponse + +_LOGGER = logging.getLogger(__name__) + +__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] + +TZ_UTC = timezone.utc +_T = typing.TypeVar("_T") + + +def _timedelta_as_isostr(td: timedelta) -> str: + """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' + + Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython + + :param timedelta td: The timedelta to convert + :rtype: str + :return: ISO8601 version of this timedelta + """ + + # Split seconds to larger units + seconds = td.total_seconds() + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + + days, hours, minutes = list(map(int, (days, hours, minutes))) + seconds = round(seconds, 6) + + # Build date + date_str = "" + if days: + date_str = "%sD" % days + + if hours or minutes or seconds: + # Build time + time_str = "T" + + # Hours + bigger_exists = date_str or hours + if bigger_exists: + time_str += "{:02}H".format(hours) + + # Minutes + bigger_exists = bigger_exists or minutes + if bigger_exists: + time_str += "{:02}M".format(minutes) + + # Seconds + try: + if seconds.is_integer(): + seconds_string = "{:02}".format(int(seconds)) + else: + # 9 chars long w/ leading 0, 6 digits after decimal + seconds_string = "%09.6f" % seconds + # Remove trailing zeros + seconds_string = seconds_string.rstrip("0") + except AttributeError: # int.is_integer() raises + seconds_string = "{:02}".format(seconds) + + time_str += "{}S".format(seconds_string) + else: + time_str = "" + + return "P" + date_str + time_str + + +def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: + encoded = base64.b64encode(o).decode() + if format == "base64url": + return encoded.strip("=").replace("+", "-").replace("/", "_") + return encoded + + +def _serialize_datetime(o, format: typing.Optional[str] = None): + if hasattr(o, "year") and hasattr(o, "hour"): + if format == "rfc7231": + return email.utils.format_datetime(o, usegmt=True) + if format == "unix-timestamp": + return int(calendar.timegm(o.utctimetuple())) + + # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) + if not o.tzinfo: + iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() + else: + iso_formatted = o.astimezone(TZ_UTC).isoformat() + # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) + return iso_formatted.replace("+00:00", "Z") + # Next try datetime.date or datetime.time + return o.isoformat() + + +def _is_readonly(p): + try: + return p._visibility == ["read"] + except AttributeError: + return False + + +class SdkJSONEncoder(JSONEncoder): + """A JSON encoder that's capable of serializing datetime objects and bytes.""" + + def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): + super().__init__(*args, **kwargs) + self.exclude_readonly = exclude_readonly + self.format = format + + def default(self, o): # pylint: disable=too-many-return-statements + if _is_model(o): + if self.exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + return {k: v for k, v in o.items() if k not in readonly_props} + return dict(o.items()) + try: + return super(SdkJSONEncoder, self).default(o) + except TypeError: + if isinstance(o, _Null): + return None + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, self.format) + try: + # First try datetime.datetime + return _serialize_datetime(o, self.format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return super(SdkJSONEncoder, self).default(o) + + +_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") +_VALID_RFC7231 = re.compile( + r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" + r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" +) + + +def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + attr = attr.upper() + match = _VALID_DATE.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + return date_obj + + +def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize RFC7231 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + match = _VALID_RFC7231.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + return email.utils.parsedate_to_datetime(attr) + + +def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: + """Deserialize unix timestamp into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + return datetime.fromtimestamp(attr, TZ_UTC) + + +def _deserialize_date(attr: typing.Union[str, date]) -> date: + """Deserialize ISO-8601 formatted string into Date object. + :param str attr: response string to be deserialized. + :rtype: date + :returns: The date object from that input + """ + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + if isinstance(attr, date): + return attr + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore + + +def _deserialize_time(attr: typing.Union[str, time]) -> time: + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :returns: The time object from that input + """ + if isinstance(attr, time): + return attr + return isodate.parse_time(attr) + + +def _deserialize_bytes(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + return bytes(base64.b64decode(attr)) + + +def _deserialize_bytes_base64(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return bytes(base64.b64decode(encoded)) + + +def _deserialize_duration(attr): + if isinstance(attr, timedelta): + return attr + return isodate.parse_duration(attr) + + +def _deserialize_decimal(attr): + if isinstance(attr, decimal.Decimal): + return attr + return decimal.Decimal(str(attr)) + + +def _deserialize_int_as_str(attr): + if isinstance(attr, int): + return attr + return int(attr) + + +_DESERIALIZE_MAPPING = { + datetime: _deserialize_datetime, + date: _deserialize_date, + time: _deserialize_time, + bytes: _deserialize_bytes, + bytearray: _deserialize_bytes, + timedelta: _deserialize_duration, + typing.Any: lambda x: x, + decimal.Decimal: _deserialize_decimal, +} + +_DESERIALIZE_MAPPING_WITHFORMAT = { + "rfc3339": _deserialize_datetime, + "rfc7231": _deserialize_datetime_rfc7231, + "unix-timestamp": _deserialize_datetime_unix_timestamp, + "base64": _deserialize_bytes, + "base64url": _deserialize_bytes_base64, +} + + +def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): + if annotation is int and rf and rf._format == "str": + return _deserialize_int_as_str + if rf and rf._format: + return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) + return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore + + +def _get_type_alias_type(module_name: str, alias_name: str): + types = { + k: v + for k, v in sys.modules[module_name].__dict__.items() + if isinstance(v, typing._GenericAlias) # type: ignore + } + if alias_name not in types: + return alias_name + return types[alias_name] + + +def _get_model(module_name: str, model_name: str): + models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} + module_end = module_name.rsplit(".", 1)[0] + models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) + if isinstance(model_name, str): + model_name = model_name.split(".")[-1] + if model_name not in models: + return model_name + return models[model_name] + + +_UNSET = object() + + +class _MyMutableMapping(MutableMapping[str, typing.Any]): + def __init__(self, data: dict[str, typing.Any]) -> None: + self._data = data + + def __contains__(self, key: typing.Any) -> bool: + return key in self._data + + def __getitem__(self, key: str) -> typing.Any: + return self._data.__getitem__(key) + + def __setitem__(self, key: str, value: typing.Any) -> None: + self._data.__setitem__(key, value) + + def __delitem__(self, key: str) -> None: + self._data.__delitem__(key) + + def __iter__(self) -> typing.Iterator[typing.Any]: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + def __ne__(self, other: typing.Any) -> bool: + return not self.__eq__(other) + + def keys(self) -> typing.KeysView[str]: + """ + :returns: a set-like object providing a view on D's keys + :rtype: ~typing.KeysView + """ + return self._data.keys() + + def values(self) -> typing.ValuesView[typing.Any]: + """ + :returns: an object providing a view on D's values + :rtype: ~typing.ValuesView + """ + return self._data.values() + + def items(self) -> typing.ItemsView[str, typing.Any]: + """ + :returns: set-like object providing a view on D's items + :rtype: ~typing.ItemsView + """ + return self._data.items() + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + """ + Get the value for key if key is in the dictionary, else default. + :param str key: The key to look up. + :param any default: The value to return if key is not in the dictionary. Defaults to None + :returns: D[k] if k in D, else d. + :rtype: any + """ + try: + return self[key] + except KeyError: + return default + + @typing.overload + def pop(self, key: str) -> typing.Any: ... # pylint: disable=arguments-differ + + @typing.overload + def pop(self, key: str, default: _T) -> _T: ... # pylint: disable=signature-differs + + @typing.overload + def pop(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs + + def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Removes specified key and return the corresponding value. + :param str key: The key to pop. + :param any default: The value to return if key is not in the dictionary + :returns: The value corresponding to the key. + :rtype: any + :raises KeyError: If key is not found and default is not given. + """ + if default is _UNSET: + return self._data.pop(key) + return self._data.pop(key, default) + + def popitem(self) -> tuple[str, typing.Any]: + """ + Removes and returns some (key, value) pair + :returns: The (key, value) pair. + :rtype: tuple + :raises KeyError: if D is empty. + """ + return self._data.popitem() + + def clear(self) -> None: + """ + Remove all items from D. + """ + self._data.clear() + + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: # pylint: disable=arguments-differ + """ + Updates D from mapping/iterable E and F. + :param any args: Either a mapping object or an iterable of key-value pairs. + """ + self._data.update(*args, **kwargs) + + @typing.overload + def setdefault(self, key: str, default: None = None) -> None: ... + + @typing.overload + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs + + def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Same as calling D.get(k, d), and setting D[k]=d if k not found + :param str key: The key to look up. + :param any default: The value to set if key is not in the dictionary + :returns: D[k] if k in D, else d. + :rtype: any + """ + if default is _UNSET: + return self._data.setdefault(key) + return self._data.setdefault(key, default) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_model = self.__class__(other) + except Exception: + return False + return self._data == other_model._data + + def __repr__(self) -> str: + return str(self._data) + + +def _is_model(obj: typing.Any) -> bool: + return getattr(obj, "_is_model", False) + + +def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements + if isinstance(o, list): + return [_serialize(x, format) for x in o] + if isinstance(o, dict): + return {k: _serialize(v, format) for k, v in o.items()} + if isinstance(o, set): + return {_serialize(x, format) for x in o} + if isinstance(o, tuple): + return tuple(_serialize(x, format) for x in o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, format) + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, enum.Enum): + return o.value + if isinstance(o, int): + if format == "str": + return str(o) + return o + try: + # First try datetime.datetime + return _serialize_datetime(o, format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return o + + +def _get_rest_field(attr_to_rest_field: dict[str, "_RestField"], rest_name: str) -> typing.Optional["_RestField"]: + try: + return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) + except StopIteration: + return None + + +def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: + if not rf: + return _serialize(value, None) + if rf._is_multipart_file_input: + return value + if rf._is_model: + return _deserialize(rf._type, value) + if isinstance(value, ET.Element): + value = _deserialize(rf._type, value) + return _serialize(value, rf._format) + + +class Model(_MyMutableMapping): + _is_model = True + # label whether current class's _attr_to_rest_field has been calculated + # could not see _attr_to_rest_field directly because subclass inherits it from parent class + _calculated: set[str] = set() + + def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: + class_name = self.__class__.__name__ + if len(args) > 1: + raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") + dict_to_pass = { + rest_field._rest_name: rest_field._default + for rest_field in self._attr_to_rest_field.values() + if rest_field._default is not _UNSET + } + if args: # pylint: disable=too-many-nested-blocks + if isinstance(args[0], ET.Element): + existed_attr_keys = [] + model_meta = getattr(self, "_xml", {}) + + for rf in self._attr_to_rest_field.values(): + prop_meta = getattr(rf, "_xml", {}) + xml_name = prop_meta.get("name", rf._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + # attribute + if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name)) + continue + + # unwrapped element is array + if prop_meta.get("unwrapped", False): + # unwrapped array could either use prop items meta/prop meta + if prop_meta.get("itemsName"): + xml_name = prop_meta.get("itemsName") + xml_ns = prop_meta.get("itemNs") + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + items = args[0].findall(xml_name) # pyright: ignore + if len(items) > 0: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) + continue + + # text element is primitive type + if prop_meta.get("text", False): + if args[0].text is not None: + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text) + continue + + # wrapped element could be normal property or array, it should only have one element + item = args[0].find(xml_name) + if item is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, item) + + # rest thing is additional properties + for e in args[0]: + if e.tag not in existed_attr_keys: + dict_to_pass[e.tag] = _convert_element(e) + else: + dict_to_pass.update( + {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} + ) + else: + non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] + if non_attr_kwargs: + # actual type errors only throw the first wrong keyword arg they see, so following that. + raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") + dict_to_pass.update( + { + self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) + for k, v in kwargs.items() + if v is not None + } + ) + super().__init__(dict_to_pass) + + def copy(self) -> "Model": + return Model(self.__dict__) + + def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: + if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated: + # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', + # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' + mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order + attr_to_rest_field: dict[str, _RestField] = { # map attribute name to rest_field property + k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") + } + annotations = { + k: v + for mro_class in mros + if hasattr(mro_class, "__annotations__") + for k, v in mro_class.__annotations__.items() + } + for attr, rf in attr_to_rest_field.items(): + rf._module = cls.__module__ + if not rf._type: + rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) + if not rf._rest_name_input: + rf._rest_name_input = attr + cls._attr_to_rest_field: dict[str, _RestField] = dict(attr_to_rest_field.items()) + cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") + + return super().__new__(cls) + + def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: + for base in cls.__bases__: + if hasattr(base, "__mapping__"): + base.__mapping__[discriminator or cls.__name__] = cls # type: ignore + + @classmethod + def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: + for v in cls.__dict__.values(): + if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators: + return v + return None + + @classmethod + def _deserialize(cls, data, exist_discriminators): + if not hasattr(cls, "__mapping__"): + return cls(data) + discriminator = cls._get_discriminator(exist_discriminators) + if discriminator is None: + return cls(data) + exist_discriminators.append(discriminator._rest_name) + if isinstance(data, ET.Element): + model_meta = getattr(cls, "_xml", {}) + prop_meta = getattr(discriminator, "_xml", {}) + xml_name = prop_meta.get("name", discriminator._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + if data.get(xml_name) is not None: + discriminator_value = data.get(xml_name) + else: + discriminator_value = data.find(xml_name).text # pyright: ignore + else: + discriminator_value = data.get(discriminator._rest_name) + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore # pylint: disable=no-member + return mapped_cls._deserialize(data, exist_discriminators) + + def as_dict(self, *, exclude_readonly: bool = False) -> dict[str, typing.Any]: + """Return a dict that can be turned into json using json.dump. + + :keyword bool exclude_readonly: Whether to remove the readonly properties. + :returns: A dict JSON compatible object + :rtype: dict + """ + + result = {} + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] + for k, v in self.items(): + if exclude_readonly and k in readonly_props: # pyright: ignore + continue + is_multipart_file_input = False + try: + is_multipart_file_input = next( + rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k + )._is_multipart_file_input + except StopIteration: + pass + result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) + return result + + @staticmethod + def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: + if v is None or isinstance(v, _Null): + return None + if isinstance(v, (list, tuple, set)): + return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) + if isinstance(v, dict): + return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} + return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v + + +def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): + if _is_model(obj): + return obj + return _deserialize(model_deserializer, obj) + + +def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): + if obj is None: + return obj + return _deserialize_with_callable(if_obj_deserializer, obj) + + +def _deserialize_with_union(deserializers, obj): + for deserializer in deserializers: + try: + return _deserialize(deserializer, obj) + except DeserializationError: + pass + raise DeserializationError() + + +def _deserialize_dict( + value_deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj: dict[typing.Any, typing.Any], +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = {child.tag: child for child in obj} + return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} + + +def _deserialize_multiple_sequence( + entry_deserializers: list[typing.Optional[typing.Callable]], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) + + +def _deserialize_sequence( + deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = list(obj) + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) + + +def _sorted_annotations(types: list[typing.Any]) -> list[typing.Any]: + return sorted( + types, + key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), + ) + + +def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-statements, too-many-branches + annotation: typing.Any, + module: typing.Optional[str], + rf: typing.Optional["_RestField"] = None, +) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + if not annotation: + return None + + # is it a type alias? + if isinstance(annotation, str): + if module is not None: + annotation = _get_type_alias_type(module, annotation) + + # is it a forward ref / in quotes? + if isinstance(annotation, (str, typing.ForwardRef)): + try: + model_name = annotation.__forward_arg__ # type: ignore + except AttributeError: + model_name = annotation + if module is not None: + annotation = _get_model(module, model_name) # type: ignore + + try: + if module and _is_model(annotation): + if rf: + rf._is_model = True + + return functools.partial(_deserialize_model, annotation) # pyright: ignore + except Exception: + pass + + # is it a literal? + try: + if annotation.__origin__ is typing.Literal: # pyright: ignore + return None + except AttributeError: + pass + + # is it optional? + try: + if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore + if len(annotation.__args__) <= 2: # pyright: ignore + if_obj_deserializer = _get_deserialize_callable_from_annotation( + next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_with_optional, if_obj_deserializer) + # the type is Optional[Union[...]], we need to remove the None type from the Union + annotation_copy = copy.copy(annotation) + annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore + return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) + except AttributeError: + pass + + # is it union? + if getattr(annotation, "__origin__", None) is typing.Union: + # initial ordering is we make `string` the last deserialization option, because it is often them most generic + deserializers = [ + _get_deserialize_callable_from_annotation(arg, module, rf) + for arg in _sorted_annotations(annotation.__args__) # pyright: ignore + ] + + return functools.partial(_deserialize_with_union, deserializers) + + try: + annotation_name = ( + annotation.__name__ if hasattr(annotation, "__name__") else annotation._name # pyright: ignore + ) + if annotation_name.lower() == "dict": + value_deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[1], module, rf # pyright: ignore + ) + + return functools.partial( + _deserialize_dict, + value_deserializer, + module, + ) + except (AttributeError, IndexError): + pass + try: + annotation_name = ( + annotation.__name__ if hasattr(annotation, "__name__") else annotation._name # pyright: ignore + ) + if annotation_name.lower() in ["list", "set", "tuple", "sequence"]: + if len(annotation.__args__) > 1: # pyright: ignore + entry_deserializers = [ + _get_deserialize_callable_from_annotation(dt, module, rf) + for dt in annotation.__args__ # pyright: ignore + ] + return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) + deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[0], module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_sequence, deserializer, module) + except (TypeError, IndexError, AttributeError, SyntaxError): + pass + + def _deserialize_default( + deserializer, + obj, + ): + if obj is None: + return obj + try: + return _deserialize_with_callable(deserializer, obj) + except Exception: + pass + return obj + + if get_deserializer(annotation, rf): + return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) + + return functools.partial(_deserialize_default, annotation) + + +def _deserialize_with_callable( + deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], + value: typing.Any, +): # pylint: disable=too-many-return-statements + try: + if value is None or isinstance(value, _Null): + return None + if isinstance(value, ET.Element): + if deserializer is str: + return value.text or "" + if deserializer is int: + return int(value.text) if value.text else None + if deserializer is float: + return float(value.text) if value.text else None + if deserializer is bool: + return value.text == "true" if value.text else None + if deserializer is None: + return value + if deserializer in [int, float, bool]: + return deserializer(value) + if isinstance(deserializer, CaseInsensitiveEnumMeta): + try: + return deserializer(value) + except ValueError: + # for unknown value, return raw value + return value + if isinstance(deserializer, type) and issubclass(deserializer, Model): + return deserializer._deserialize(value, []) + return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) + except Exception as e: + raise DeserializationError() from e + + +def _deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + if isinstance(value, PipelineResponse): + value = value.http_response.json() + if rf is None and format: + rf = _RestField(format=format) + if not isinstance(deserializer, functools.partial): + deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) + return _deserialize_with_callable(deserializer, value) + + +def _failsafe_deserialize( + deserializer: typing.Any, + response: HttpResponse, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + try: + return _deserialize(deserializer, response.json(), module, rf, format) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + +def _failsafe_deserialize_xml( + deserializer: typing.Any, + response: HttpResponse, +) -> typing.Any: + try: + return _deserialize_xml(deserializer, response.text()) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + +class _RestField: + def __init__( + self, + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + is_discriminator: bool = False, + visibility: typing.Optional[list[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[dict[str, typing.Any]] = None, + ): + self._type = type + self._rest_name_input = name + self._module: typing.Optional[str] = None + self._is_discriminator = is_discriminator + self._visibility = visibility + self._is_model = False + self._default = default + self._format = format + self._is_multipart_file_input = is_multipart_file_input + self._xml = xml if xml is not None else {} + + @property + def _class_type(self) -> typing.Any: + return getattr(self._type, "args", [None])[0] + + @property + def _rest_name(self) -> str: + if self._rest_name_input is None: + raise ValueError("Rest name was never set") + return self._rest_name_input + + def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin + # by this point, type and rest_name will have a value bc we default + # them in __new__ of the Model class + item = obj.get(self._rest_name) + if item is None: + return item + if self._is_model: + return item + return _deserialize(self._type, _serialize(item, self._format), rf=self) + + def __set__(self, obj: Model, value) -> None: + if value is None: + # we want to wipe out entries if users set attr to None + try: + obj.__delitem__(self._rest_name) + except KeyError: + pass + return + if self._is_model: + if not _is_model(value): + value = _deserialize(self._type, value) + obj.__setitem__(self._rest_name, value) + return + obj.__setitem__(self._rest_name, _serialize(value, self._format)) + + def _get_deserialize_callable_from_annotation( + self, annotation: typing.Any + ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + return _get_deserialize_callable_from_annotation(annotation, self._module, self) + + +def rest_field( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[list[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField( + name=name, + type=type, + visibility=visibility, + default=default, + format=format, + is_multipart_file_input=is_multipart_file_input, + xml=xml, + ) + + +def rest_discriminator( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[list[str]] = None, + xml: typing.Optional[dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml) + + +def serialize_xml(model: Model, exclude_readonly: bool = False) -> str: + """Serialize a model to XML. + + :param Model model: The model to serialize. + :param bool exclude_readonly: Whether to exclude readonly properties. + :returns: The XML representation of the model. + :rtype: str + """ + return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore + + +def _get_element( + o: typing.Any, + exclude_readonly: bool = False, + parent_meta: typing.Optional[dict[str, typing.Any]] = None, + wrapped_element: typing.Optional[ET.Element] = None, +) -> typing.Union[ET.Element, list[ET.Element]]: + if _is_model(o): + model_meta = getattr(o, "_xml", {}) + + # if prop is a model, then use the prop element directly, else generate a wrapper of model + if wrapped_element is None: + wrapped_element = _create_xml_element( + model_meta.get("name", o.__class__.__name__), + model_meta.get("prefix"), + model_meta.get("ns"), + ) + + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + + for k, v in o.items(): + # do not serialize readonly properties + if exclude_readonly and k in readonly_props: + continue + + prop_rest_field = _get_rest_field(o._attr_to_rest_field, k) + if prop_rest_field: + prop_meta = getattr(prop_rest_field, "_xml").copy() + # use the wire name as xml name if no specific name is set + if prop_meta.get("name") is None: + prop_meta["name"] = k + else: + # additional properties will not have rest field, use the wire name as xml name + prop_meta = {"name": k} + + # if no ns for prop, use model's + if prop_meta.get("ns") is None and model_meta.get("ns"): + prop_meta["ns"] = model_meta.get("ns") + prop_meta["prefix"] = model_meta.get("prefix") + + if prop_meta.get("unwrapped", False): + # unwrapped could only set on array + wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta)) + elif prop_meta.get("text", False): + # text could only set on primitive type + wrapped_element.text = _get_primitive_type_value(v) + elif prop_meta.get("attribute", False): + xml_name = prop_meta.get("name", k) + if prop_meta.get("ns"): + ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore + xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore + # attribute should be primitive type + wrapped_element.set(xml_name, _get_primitive_type_value(v)) + else: + # other wrapped prop element + wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta)) + return wrapped_element + if isinstance(o, list): + return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore + if isinstance(o, dict): + result = [] + for k, v in o.items(): + result.append( + _get_wrapped_element( + v, + exclude_readonly, + { + "name": k, + "ns": parent_meta.get("ns") if parent_meta else None, + "prefix": parent_meta.get("prefix") if parent_meta else None, + }, + ) + ) + return result + + # primitive case need to create element based on parent_meta + if parent_meta: + return _get_wrapped_element( + o, + exclude_readonly, + { + "name": parent_meta.get("itemsName", parent_meta.get("name")), + "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")), + "ns": parent_meta.get("itemsNs", parent_meta.get("ns")), + }, + ) + + raise ValueError("Could not serialize value into xml: " + o) + + +def _get_wrapped_element( + v: typing.Any, + exclude_readonly: bool, + meta: typing.Optional[dict[str, typing.Any]], +) -> ET.Element: + wrapped_element = _create_xml_element( + meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None + ) + if isinstance(v, (dict, list)): + wrapped_element.extend(_get_element(v, exclude_readonly, meta)) + elif _is_model(v): + _get_element(v, exclude_readonly, meta, wrapped_element) + else: + wrapped_element.text = _get_primitive_type_value(v) + return wrapped_element + + +def _get_primitive_type_value(v) -> str: + if v is True: + return "true" + if v is False: + return "false" + if isinstance(v, _Null): + return "" + return str(v) + + +def _create_xml_element(tag, prefix=None, ns=None): + if prefix and ns: + ET.register_namespace(prefix, ns) + if ns: + return ET.Element("{" + ns + "}" + tag) + return ET.Element(tag) + + +def _deserialize_xml( + deserializer: typing.Any, + value: str, +) -> typing.Any: + element = ET.fromstring(value) # nosec + return _deserialize(deserializer, element) + + +def _convert_element(e: ET.Element): + # dict case + if len(e.attrib) > 0 or len({child.tag for child in e}) > 1: + dict_result: dict[str, typing.Any] = {} + for child in e: + if dict_result.get(child.tag) is not None: + if isinstance(dict_result[child.tag], list): + dict_result[child.tag].append(_convert_element(child)) + else: + dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)] + else: + dict_result[child.tag] = _convert_element(child) + dict_result.update(e.attrib) + return dict_result + # array case + if len(e) > 0: + array_result: list[typing.Any] = [] + for child in e: + array_result.append(_convert_element(child)) + return array_result + # primitive case + return e.text diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/serialization.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/serialization.py new file mode 100644 index 000000000000..45a3e44e45cb --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/serialization.py @@ -0,0 +1,2030 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + MutableMapping, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore +from typing_extensions import Self + +from azure.core.exceptions import DeserializationError, SerializationError +from azure.core.serialization import NULL as CoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + :return: The deserialized data. + :rtype: object + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) from err + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError as err: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise DeserializationError("XML is invalid") from err + elif content_type.startswith("text/"): + return data_as_str + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + + :param bytes body_bytes: The body of the response. + :param dict headers: The headers of the response. + :returns: The deserialized data. + :rtype: object + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + +TZ_UTC = datetime.timezone.utc + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Optional[dict[str, Any]] = {} + for k in kwargs: # pylint: disable=consider-using-dict-items + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are equal + :rtype: bool + """ + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are not equal + :rtype: bool + """ + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node. + + :returns: The XML node + :rtype: xml.etree.ElementTree.Element + """ + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to server from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, keep_readonly=keep_readonly, **kwargs + ) + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[[str, dict[str, Any], Any], Any] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs + ) + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: # pylint: disable=broad-exception-caught + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises DeserializationError: if something went wrong + :rtype: Self + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def from_dict( + cls, + data: Any, + key_extractors: Optional[Callable[[str, dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> Self: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param function key_extractors: A key extractor function. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises DeserializationError: if something went wrong + :rtype: Self + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result |= objects[valuetype]._flatten_subtype(key, objects) # pylint: disable=protected-access + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + + :param dict response: The initial data + :param dict objects: The class objects + :returns: The class to be used + :rtype: class + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + :returns: The decoded key + :rtype: str + """ + return key.replace("\\.", ".") + + +class Serializer: # pylint: disable=too-many-public-methods + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: dict[str, type] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals + self, target_obj, data_type=None, **kwargs + ): + """Serialize data into a string according to type. + + :param object target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises SerializationError: if serialization fails. + :returns: The serialized data. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() # pylint: disable=protected-access + try: + attributes = target_obj._attribute_map # pylint: disable=protected-access + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access + attr_name, {} + ).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized |= target_obj.additional_properties + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, + # we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError as err: + if isinstance(err, SerializationError): + raise + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise SerializationError(msg) from err + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises SerializationError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized request body + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access + except DeserializationError as err: + raise SerializationError("Unable to build a model: " + str(err)) from err + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param str name: The name of the URL path parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :returns: The serialized URL path + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + output = output.replace("{", quote("{")).replace("}", quote("}")) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param str name: The name of the query parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, list + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized query parameter + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + do_quote = not kwargs.get("skip_quote", False) + return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param str name: The name of the header. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized header + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :raises AttributeError: if required data is None. + :raises ValueError: if data is None + :raises SerializationError: if serialization fails. + :returns: The serialized data. + :rtype: str, int, float, bool, dict, list + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is CoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + if data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, cast(type, data.__class__)) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise SerializationError(msg.format(data, data_type)) from err + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param obj data: Object to be serialized. + :param str data_type: Type of object in the iterable. + :rtype: str, int, float, bool + :return: serialized object + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + return eval(data_type)(data) # nosec # pylint: disable=eval-used + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param str data: Object to be serialized. + :rtype: str + :return: serialized object + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list data: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + Defaults to False. + :rtype: list, str + :return: serialized iterable + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized.append(None) + + if kwargs.get("do_quote", False): + serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :rtype: dict + :return: serialized dictionary + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + :return: serialized object + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + if obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError as exc: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) from exc + + @staticmethod + def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument + """Serialize bytearray into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument + """Serialize str into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Decimal object to float. + + :param decimal attr: Object to be serialized. + :rtype: float + :return: serialized decimal + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): # pylint: disable=unused-argument + """Serialize long (Py2) or int (Py3). + + :param int attr: Object to be serialized. + :rtype: int/long + :return: serialized long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + :return: serialized date + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + :return: serialized time + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + :return: serialized duration + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises TypeError: if format invalid. + :return: serialized rfc + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError as exc: + raise TypeError("RFC1123 object must be valid Datetime object.") from exc + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises SerializationError: if format invalid. + :return: serialized iso + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise SerializationError(msg) from err + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise TypeError(msg) from err + + @staticmethod + def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises SerializationError: if format invalid + :return: serialied unix + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError as exc: + raise TypeError("Unix time object must be valid Datetime object.") from exc + + +def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(list[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements + attr, attr_desc, data +): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer: + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: dict[str, type] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, str): + return self.deserialize_data(data, response) + if isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None or data is CoreNull: + return data + try: + attributes = response._attribute_map # type: ignore # pylint: disable=protected-access + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise DeserializationError(msg) from err + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :return: The classified target object and its class name. + :rtype: tuple + """ + if target is None: + return None, None + + if isinstance(target, str): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + :return: Deserialized object. + :rtype: object + """ + try: + return self(target_obj, data, content_type=content_type) + except: # pylint: disable=bare-except + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param obj raw_data: Data to be processed. + :param str content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + :rtype: object + :return: Unpacked content. + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param Response response: The response model class. + :param dict attrs: The deserialized response attributes. + :param dict additional_properties: Additional properties to be set. + :rtype: Response + :return: The instantiated response model. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("readonly") + ] + const = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("constant") + ] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties # type: ignore + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) from err + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) from exp + + def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment + "object", + "[]", + r"{}", + ] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise DeserializationError(msg) from err + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :return: Deserialized iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :return: Deserialized dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :return: Deserialized object. + :rtype: dict + :raises TypeError: if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, str): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :return: Deserialized basic type. + :rtype: str, int, float or bool + :raises TypeError: if string format is not valid. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + if isinstance(attr, str): + if attr.lower() in ["true", "1"]: + return True + if attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + return eval(data_type)(attr) # nosec # pylint: disable=eval-used + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :return: Deserialized string. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :return: Deserialized enum object. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + try: + return list(enum_obj.__members__.values())[data] + except IndexError as exc: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) from exc + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :return: Deserialized bytearray + :rtype: bytearray + :raises TypeError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :return: Deserialized base64 string + :rtype: bytearray + :raises TypeError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :return: Deserialized decimal + :raises DeserializationError: if string format invalid. + :rtype: decimal + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(str(attr)) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise DeserializationError(msg) from err + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :return: Deserialized int + :rtype: long or int + :raises ValueError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :return: Deserialized duration + :rtype: TimeDelta + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise DeserializationError(msg) from err + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :return: Deserialized date + :rtype: Date + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=0, defaultday=0) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :return: Deserialized time + :rtype: datetime.time + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized RFC datetime + :rtype: Datetime + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=datetime.timezone(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized ISO datetime + :rtype: Datetime + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :return: Deserialized datetime + :rtype: Datetime + :raises DeserializationError: if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + attr = int(attr) + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise DeserializationError(msg) from err + return date_obj diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/utils.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/utils.py new file mode 100644 index 000000000000..35c9c836f85f --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_utils/utils.py @@ -0,0 +1,25 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +from typing import Generic, TYPE_CHECKING, TypeVar + +if TYPE_CHECKING: + from .serialization import Deserializer, Serializer + + +TClient = TypeVar("TClient") +TConfig = TypeVar("TConfig") + + +class ClientMixinABC(ABC, Generic[TClient, TConfig]): + """DO NOT use this class. It is for internal typing use only.""" + + _client: TClient + _config: TConfig + _serialize: "Serializer" + _deserialize: "Deserializer" diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/operations/__init__.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_version.py similarity index 67% rename from sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/operations/__init__.py rename to sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_version.py index a1b6ce78bd5c..be71c81bd282 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/operations/__init__.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/_version.py @@ -2,12 +2,8 @@ # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. +# Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._azure_monitor_client_operations import AzureMonitorClientOperationsMixin - -__all__ = [ - "AzureMonitorClientOperationsMixin", -] +VERSION = "1.0.0b1" diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/__init__.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/__init__.py new file mode 100644 index 000000000000..574512236a85 --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import AzureMonitorExporterClient # type: ignore + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AzureMonitorExporterClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_client.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_client.py new file mode 100644 index 000000000000..5dfce8362b6d --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_client.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable +from typing_extensions import Self + +from azure.core import AsyncPipelineClient +from azure.core.pipeline import policies +from azure.core.rest import AsyncHttpResponse, HttpRequest + +from .._utils.serialization import Deserializer, Serializer +from ._configuration import AzureMonitorExporterClientConfiguration +from ._operations import _AzureMonitorExporterClientOperationsMixin + + +class AzureMonitorExporterClient(_AzureMonitorExporterClientOperationsMixin): + """OpenTelemetry Exporter for Azure Monitor. + + :keyword host: Application Insights' Breeze endpoint. Default value is + "https://dc.services.visualstudio.com". + :paramtype host: str + :keyword api_version: The service API version. Known values are "v2.1" and None. Default value + is "v2.1". Note that overriding this default value may result in unsupported behavior. + :paramtype api_version: str or ~exporter.models.Versions + """ + + def __init__( # pylint: disable=missing-client-constructor-parameter-credential + self, *, host: str = "https://dc.services.visualstudio.com", **kwargs: Any + ) -> None: + _endpoint = "{host}/{apiVersion}" + self._config = AzureMonitorExporterClientConfiguration(host=host, **kwargs) + + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "host": self._serialize.url("self._config.host", self._config.host, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> Self: + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/_configuration.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_configuration.py similarity index 67% rename from sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/_configuration.py rename to sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_configuration.py index 1bc76b30457b..e3652c2000fb 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/aio/_configuration.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_configuration.py @@ -2,36 +2,38 @@ # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. +# Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any -from azure.core.configuration import Configuration from azure.core.pipeline import policies -VERSION = "unknown" +from .._version import VERSION -class AzureMonitorClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes - """Configuration for AzureMonitorClient. +class AzureMonitorExporterClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for AzureMonitorExporterClient. Note that all parameters used to create this instance are saved as instance attributes. - :param host: Breeze endpoint: https://dc.services.visualstudio.com. Default value is + :param host: Application Insights' Breeze endpoint. Default value is "https://dc.services.visualstudio.com". :type host: str + :keyword api_version: The service API version. Known values are "v2.1" and None. Default value + is "v2.1". Note that overriding this default value may result in unsupported behavior. + :paramtype api_version: str or ~exporter.models.Versions """ def __init__(self, host: str = "https://dc.services.visualstudio.com", **kwargs: Any) -> None: - super(AzureMonitorClientConfiguration, self).__init__(**kwargs) - if host is None: - raise ValueError("Parameter 'host' must not be None.") + api_version: str = kwargs.pop("api_version", "v2.1") self.host = host - kwargs.setdefault("sdk_moniker", "azuremonitorclient/{}".format(VERSION)) + self.api_version = api_version + kwargs.setdefault("sdk_moniker", "monitor-opentelemetry-exporter/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) def _configure(self, **kwargs: Any) -> None: @@ -40,7 +42,7 @@ def _configure(self, **kwargs: Any) -> None: self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) self.authentication_policy = kwargs.get("authentication_policy") diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_operations/__init__.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_operations/__init__.py new file mode 100644 index 000000000000..39274f6e0168 --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_operations/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import _AzureMonitorExporterClientOperationsMixin # type: ignore # pylint: disable=unused-import + +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_operations/_operations.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_operations/_operations.py new file mode 100644 index 000000000000..88c71f21b313 --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_operations/_operations.py @@ -0,0 +1,186 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from collections.abc import MutableMapping +from io import IOBase +import json +from typing import Any, Callable, IO, Optional, TypeVar, Union, overload + +from azure.core import AsyncPipelineClient +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._operations._operations import build_azure_monitor_exporter_track_request +from ..._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize +from ..._utils.utils import ClientMixinABC +from .._configuration import AzureMonitorExporterClientConfiguration + +JSON = MutableMapping[str, Any] +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] + + +class _AzureMonitorExporterClientOperationsMixin( + ClientMixinABC[AsyncPipelineClient[HttpRequest, AsyncHttpResponse], AzureMonitorExporterClientConfiguration] +): + + @overload + async def track( + self, body: list[_models.TelemetryItem], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.TrackResponse: + """Track telemetry events. + + This operation sends a sequence of telemetry events that will be monitored by + Azure Monitor. + + :param body: The list of telemetry events to track. Required. + :type body: list[~exporter.models.TelemetryItem] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: TrackResponse. The TrackResponse is compatible with MutableMapping + :rtype: ~exporter.models.TrackResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def track( + self, body: list[JSON], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.TrackResponse: + """Track telemetry events. + + This operation sends a sequence of telemetry events that will be monitored by + Azure Monitor. + + :param body: The list of telemetry events to track. Required. + :type body: list[JSON] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: TrackResponse. The TrackResponse is compatible with MutableMapping + :rtype: ~exporter.models.TrackResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def track( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.TrackResponse: + """Track telemetry events. + + This operation sends a sequence of telemetry events that will be monitored by + Azure Monitor. + + :param body: The list of telemetry events to track. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: TrackResponse. The TrackResponse is compatible with MutableMapping + :rtype: ~exporter.models.TrackResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def track( + self, body: Union[list[_models.TelemetryItem], list[JSON], IO[bytes]], **kwargs: Any + ) -> _models.TrackResponse: + """Track telemetry events. + + This operation sends a sequence of telemetry events that will be monitored by + Azure Monitor. + + :param body: The list of telemetry events to track. Is one of the following types: + [TelemetryItem], [JSON], IO[bytes] Required. + :type body: list[~exporter.models.TelemetryItem] or list[JSON] or IO[bytes] + :return: TrackResponse. The TrackResponse is compatible with MutableMapping + :rtype: ~exporter.models.TrackResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + cls: ClsType[_models.TrackResponse] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_azure_monitor_exporter_track_request( + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "host": self._serialize.url("self._config.host", self._config.host, "str", skip_quote=True), + "apiVersion": self._serialize.url("self._config.api_version", self._config.api_version, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = None + if response.status_code == 400: + error = _failsafe_deserialize(_models.TrackResponse, response) + elif response.status_code == 402: + error = _failsafe_deserialize(_models.TrackResponse, response) + elif response.status_code == 429: + error = _failsafe_deserialize(_models.TrackResponse, response) + elif response.status_code == 500: + error = _failsafe_deserialize(_models.TrackResponse, response) + elif response.status_code == 503: + error = _failsafe_deserialize(_models.TrackResponse, response) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TrackResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_operations/_patch.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_operations/_patch.py new file mode 100644 index 000000000000..87676c65a8f0 --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_operations/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" + + +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_patch.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_patch.py new file mode 100644 index 000000000000..87676c65a8f0 --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/aio/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" + + +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/__init__.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/__init__.py new file mode 100644 index 000000000000..bcce0e6a4c1d --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/__init__.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + + +from ._models import ( # type: ignore + MessageData, + MetricDataPoint, + MetricsData, + MonitorBase, + MonitorDomain, + PageViewData, + PageViewPerfData, + RemoteDependencyData, + RequestData, + StackFrame, + TelemetryErrorDetails, + TelemetryEventData, + TelemetryExceptionData, + TelemetryExceptionDetails, + TelemetryItem, + TrackResponse, +) + +from ._enums import ( # type: ignore + ContextTagKeys, + DataPointType, + SeverityLevel, + Versions, +) +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "MessageData", + "MetricDataPoint", + "MetricsData", + "MonitorBase", + "MonitorDomain", + "PageViewData", + "PageViewPerfData", + "RemoteDependencyData", + "RequestData", + "StackFrame", + "TelemetryErrorDetails", + "TelemetryEventData", + "TelemetryExceptionData", + "TelemetryExceptionDetails", + "TelemetryItem", + "TrackResponse", + "ContextTagKeys", + "DataPointType", + "SeverityLevel", + "Versions", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/_azure_monitor_client_enums.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/_enums.py similarity index 65% rename from sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/_azure_monitor_client_enums.py rename to sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/_enums.py index cb26c587c65b..45b959e58489 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/_azure_monitor_client_enums.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/_enums.py @@ -2,7 +2,7 @@ # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. +# Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -14,47 +14,89 @@ class ContextTagKeys(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The context tag keys.""" AI_APPLICATION_VER = "ai.application.ver" + """Application version.""" AI_DEVICE_ID = "ai.device.id" + """Device ID.""" AI_DEVICE_LOCALE = "ai.device.locale" + """Device locale.""" AI_DEVICE_MODEL = "ai.device.model" + """Device model.""" AI_DEVICE_OEM_NAME = "ai.device.oemName" + """Device OEM name.""" AI_DEVICE_OS_VERSION = "ai.device.osVersion" + """Device OS version.""" AI_DEVICE_TYPE = "ai.device.type" + """Device type.""" AI_LOCATION_IP = "ai.location.ip" + """Location IP.""" AI_LOCATION_COUNTRY = "ai.location.country" + """Location country.""" AI_LOCATION_PROVINCE = "ai.location.province" + """Location province.""" AI_LOCATION_CITY = "ai.location.city" + """Location city.""" AI_OPERATION_ID = "ai.operation.id" + """Operation ID.""" AI_OPERATION_NAME = "ai.operation.name" + """Operation name.""" AI_OPERATION_PARENT_ID = "ai.operation.parentId" + """Operation parent ID.""" AI_OPERATION_SYNTHETIC_SOURCE = "ai.operation.syntheticSource" + """Operation synthetic source.""" AI_OPERATION_CORRELATION_VECTOR = "ai.operation.correlationVector" + """Operation correlation vector.""" AI_SESSION_ID = "ai.session.id" + """Session ID.""" AI_SESSION_IS_FIRST = "ai.session.isFirst" + """If session is the first one.""" AI_USER_ACCOUNT_ID = "ai.user.accountId" + """User account ID.""" AI_USER_ID = "ai.user.id" + """User ID.""" AI_USER_AUTH_USER_ID = "ai.user.authUserId" + """Authenticated user ID.""" AI_CLOUD_ROLE = "ai.cloud.role" + """Cloud role.""" AI_CLOUD_ROLE_VER = "ai.cloud.roleVer" + """Cloud role version.""" AI_CLOUD_ROLE_INSTANCE = "ai.cloud.roleInstance" + """Cloud role instance.""" AI_CLOUD_LOCATION = "ai.cloud.location" + """Cloud location.""" AI_INTERNAL_SDK_VERSION = "ai.internal.sdkVersion" + """Internal SDK version.""" AI_INTERNAL_AGENT_VERSION = "ai.internal.agentVersion" + """Internal agent version.""" AI_INTERNAL_NODE_NAME = "ai.internal.nodeName" + """Internal node name.""" class DataPointType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Type of the metric data measurement.""" + """Type of the metric data.""" MEASUREMENT = "Measurement" + """Single measurement.""" AGGREGATION = "Aggregation" + """Aggregated value.""" class SeverityLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Defines the level of severity for the event.""" VERBOSE = "Verbose" + """Verbose level.""" INFORMATION = "Information" + """Information level.""" WARNING = "Warning" + """Warning level.""" ERROR = "Error" + """Error level.""" CRITICAL = "Critical" + """Critical level.""" + + +class Versions(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of Versions.""" + + V2_1 = "v2.1" + """The V2.1 API version.""" diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/_models.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/_models.py new file mode 100644 index 000000000000..048d8299325b --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/_models.py @@ -0,0 +1,995 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=useless-super-delegation + +import datetime +from typing import Any, Mapping, Optional, TYPE_CHECKING, Union, overload + +from .._utils.model_base import Model as _Model, rest_field + +if TYPE_CHECKING: + from .. import models as _models + + +class MonitorDomain(_Model): + """The abstract common base of all domains. + + :ivar version: Schema version. Required. + :vartype version: int + """ + + version: int = rest_field(name="ver", visibility=["read", "create", "update", "delete", "query"]) + """Schema version. Required.""" + + @overload + def __init__( + self, + *, + version: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageData(MonitorDomain): + """Instances of Message represent printf-like trace statements that are + text-searched. Log4Net, NLog and other text-based log file entries are + translated into instances of this type. The message does not have measurements. + + :ivar version: Schema version. Required. + :vartype version: int + :ivar message: Trace message. Required. + :vartype message: str + :ivar severity_level: Trace severity level. Known values are: "Verbose", "Information", + "Warning", "Error", and "Critical". + :vartype severity_level: str or ~exporter.models.SeverityLevel + :ivar properties: Collection of custom properties. + :vartype properties: dict[str, str] + :ivar measurements: Collection of custom measurements. + :vartype measurements: dict[str, float] + """ + + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Trace message. Required.""" + severity_level: Optional[Union[str, "_models.SeverityLevel"]] = rest_field( + name="severityLevel", visibility=["read", "create", "update", "delete", "query"] + ) + """Trace severity level. Known values are: \"Verbose\", \"Information\", \"Warning\", \"Error\", + and \"Critical\".""" + properties: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom properties.""" + measurements: Optional[dict[str, float]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom measurements.""" + + @overload + def __init__( + self, + *, + version: int, + message: str, + severity_level: Optional[Union[str, "_models.SeverityLevel"]] = None, + properties: Optional[dict[str, str]] = None, + measurements: Optional[dict[str, float]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MetricDataPoint(_Model): + """Metric data single measurement. + + :ivar namespace: Namespace of the metric. + :vartype namespace: str + :ivar name: Name of the metric. Required. + :vartype name: str + :ivar data_point_type: Metric type. Single measurement or the aggregated value. Known values + are: "Measurement" and "Aggregation". + :vartype data_point_type: str or ~exporter.models.DataPointType + :ivar value: Single value for measurement. Sum of individual measurements for the + aggregation. Required. + :vartype value: float + :ivar count: Metric weight of the aggregated metric. Should not be set for a measurement. + :vartype count: int + :ivar min: Minimum value of the aggregated metric. Should not be set for a measurement. + :vartype min: float + :ivar max: Maximum value of the aggregated metric. Should not be set for a measurement. + :vartype max: float + :ivar std_dev: Standard deviation of the aggregated metric. Should not be set for a + measurement. + :vartype std_dev: float + """ + + namespace: Optional[str] = rest_field(name="ns", visibility=["read", "create", "update", "delete", "query"]) + """Namespace of the metric.""" + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name of the metric. Required.""" + data_point_type: Optional[Union[str, "_models.DataPointType"]] = rest_field( + name="kind", visibility=["read", "create", "update", "delete", "query"] + ) + """Metric type. Single measurement or the aggregated value. Known values are: \"Measurement\" and + \"Aggregation\".""" + value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Single value for measurement. Sum of individual measurements for the + aggregation. Required.""" + count: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Metric weight of the aggregated metric. Should not be set for a measurement.""" + min: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Minimum value of the aggregated metric. Should not be set for a measurement.""" + max: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Maximum value of the aggregated metric. Should not be set for a measurement.""" + std_dev: Optional[float] = rest_field(name="stdDev", visibility=["read", "create", "update", "delete", "query"]) + """Standard deviation of the aggregated metric. Should not be set for a + measurement.""" + + @overload + def __init__( + self, + *, + name: str, + value: float, + namespace: Optional[str] = None, + data_point_type: Optional[Union[str, "_models.DataPointType"]] = None, + count: Optional[int] = None, + min: Optional[float] = None, # pylint: disable=redefined-builtin + max: Optional[float] = None, # pylint: disable=redefined-builtin + std_dev: Optional[float] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MetricsData(MonitorDomain): + """An instance of the Metric item is a list of measurements (single data points) + and/or aggregations. + + :ivar version: Schema version. Required. + :vartype version: int + :ivar metrics: List of metrics. Only one metric in the list is currently supported by + Application Insights storage. If multiple data points were sent only the first + one will be used. Required. + :vartype metrics: list[~exporter.models.MetricDataPoint] + :ivar properties: Collection of custom properties. + :vartype properties: dict[str, str] + """ + + metrics: list["_models.MetricDataPoint"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """List of metrics. Only one metric in the list is currently supported by + Application Insights storage. If multiple data points were sent only the first + one will be used. Required.""" + properties: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom properties.""" + + @overload + def __init__( + self, + *, + version: int, + metrics: list["_models.MetricDataPoint"], + properties: Optional[dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MonitorBase(_Model): + """Data struct to contain only C section with custom fields. + + :ivar base_type: Name of item (B section) if any. If telemetry data is derived straight from + this, this should be null. + :vartype base_type: str + :ivar base_data: The data payload for the telemetry request. + :vartype base_data: ~exporter.models.MonitorDomain + """ + + base_type: Optional[str] = rest_field(name="baseType", visibility=["read", "create", "update", "delete", "query"]) + """Name of item (B section) if any. If telemetry data is derived straight from + this, this should be null.""" + base_data: Optional["_models.MonitorDomain"] = rest_field( + name="baseData", visibility=["read", "create", "update", "delete", "query"] + ) + """The data payload for the telemetry request.""" + + @overload + def __init__( + self, + *, + base_type: Optional[str] = None, + base_data: Optional["_models.MonitorDomain"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class PageViewData(MonitorDomain): + """An instance of PageView represents a generic action on a page like a button + click. It is also the base type for PageView. + + :ivar version: Schema version. Required. + :vartype version: int + :ivar id: Identifier of a page view instance. Used for correlation between page view and + other telemetry items. Required. + :vartype id: str + :ivar name: Event name. Keep it low cardinality to allow proper grouping and useful metrics. + Required. + :vartype name: str + :ivar url: Request URL with all query string parameters. + :vartype url: str + :ivar duration: Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view (PageViewData), + this is the duration. For a page view with performance information + (PageViewPerfData), this is the page load time. Must be less than 1000 days. + :vartype duration: str + :ivar referred_uri: Fully qualified page URI or URL of the referring page; if unknown, leave + blank. + :vartype referred_uri: str + :ivar properties: Collection of custom properties. + :vartype properties: dict[str, str] + :ivar measurements: Collection of custom measurements. + :vartype measurements: dict[str, float] + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Identifier of a page view instance. Used for correlation between page view and + other telemetry items. Required.""" + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Event name. Keep it low cardinality to allow proper grouping and useful metrics. Required.""" + url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Request URL with all query string parameters.""" + duration: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view (PageViewData), + this is the duration. For a page view with performance information + (PageViewPerfData), this is the page load time. Must be less than 1000 days.""" + referred_uri: Optional[str] = rest_field( + name="referredUri", visibility=["read", "create", "update", "delete", "query"] + ) + """Fully qualified page URI or URL of the referring page; if unknown, leave blank.""" + properties: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom properties.""" + measurements: Optional[dict[str, float]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom measurements.""" + + @overload + def __init__( + self, + *, + version: int, + id: str, # pylint: disable=redefined-builtin + name: str, + url: Optional[str] = None, + duration: Optional[str] = None, + referred_uri: Optional[str] = None, + properties: Optional[dict[str, str]] = None, + measurements: Optional[dict[str, float]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class PageViewPerfData(MonitorDomain): + """An instance of PageViewPerf represents: a page view with no performance data, a + page view with performance data, or just the performance data of an earlier + page request. + + :ivar version: Schema version. Required. + :vartype version: int + :ivar id: Identifier of a page view instance. Used for correlation between page view and + other telemetry items. Required. + :vartype id: str + :ivar name: Event name. Keep it low cardinality to allow proper grouping and useful metrics. + Required. + :vartype name: str + :ivar url: Request URL with all query string parameters. + :vartype url: str + :ivar duration: Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view (PageViewData), + this is the duration. For a page view with performance information + (PageViewPerfData), this is the page load time. Must be less than 1000 days. + :vartype duration: str + :ivar perf_total: Performance total in TimeSpan 'G' (general long) format: d:hh:mm:ss.fffffff. + :vartype perf_total: str + :ivar network_connect: Network connection time in TimeSpan 'G' (general long) format: + d:hh:mm:ss.fffffff. + :vartype network_connect: str + :ivar sent_request: Sent request time in TimeSpan 'G' (general long) format: + d:hh:mm:ss.fffffff. + :vartype sent_request: str + :ivar received_response: Received response time in TimeSpan 'G' (general long) format: + d:hh:mm:ss.fffffff. + :vartype received_response: str + :ivar dom_processing: DOM processing time in TimeSpan 'G' (general long) format: + d:hh:mm:ss.fffffff. + :vartype dom_processing: str + :ivar properties: Collection of custom properties. + :vartype properties: dict[str, str] + :ivar measurements: Collection of custom measurements. + :vartype measurements: dict[str, float] + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Identifier of a page view instance. Used for correlation between page view and + other telemetry items. Required.""" + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Event name. Keep it low cardinality to allow proper grouping and useful metrics. Required.""" + url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Request URL with all query string parameters.""" + duration: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view (PageViewData), + this is the duration. For a page view with performance information + (PageViewPerfData), this is the page load time. Must be less than 1000 days.""" + perf_total: Optional[str] = rest_field(name="perfTotal", visibility=["read", "create", "update", "delete", "query"]) + """Performance total in TimeSpan 'G' (general long) format: d:hh:mm:ss.fffffff.""" + network_connect: Optional[str] = rest_field( + name="networkConnect", visibility=["read", "create", "update", "delete", "query"] + ) + """Network connection time in TimeSpan 'G' (general long) format: + d:hh:mm:ss.fffffff.""" + sent_request: Optional[str] = rest_field( + name="sentRequest", visibility=["read", "create", "update", "delete", "query"] + ) + """Sent request time in TimeSpan 'G' (general long) format: d:hh:mm:ss.fffffff.""" + received_response: Optional[str] = rest_field( + name="receivedResponse", visibility=["read", "create", "update", "delete", "query"] + ) + """Received response time in TimeSpan 'G' (general long) format: d:hh:mm:ss.fffffff.""" + dom_processing: Optional[str] = rest_field( + name="domProcessing", visibility=["read", "create", "update", "delete", "query"] + ) + """DOM processing time in TimeSpan 'G' (general long) format: d:hh:mm:ss.fffffff.""" + properties: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom properties.""" + measurements: Optional[dict[str, float]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom measurements.""" + + @overload + def __init__( + self, + *, + version: int, + id: str, # pylint: disable=redefined-builtin + name: str, + url: Optional[str] = None, + duration: Optional[str] = None, + perf_total: Optional[str] = None, + network_connect: Optional[str] = None, + sent_request: Optional[str] = None, + received_response: Optional[str] = None, + dom_processing: Optional[str] = None, + properties: Optional[dict[str, str]] = None, + measurements: Optional[dict[str, float]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RemoteDependencyData(MonitorDomain): + """An instance of Remote Dependency represents an interaction of the monitored + component with a remote component/service like SQL or an HTTP endpoint. + + :ivar version: Schema version. Required. + :vartype version: int + :ivar id: Identifier of a dependency call instance. Used for correlation with the request + telemetry item corresponding to this dependency call. + :vartype id: str + :ivar name: Name of the command initiated with this dependency call. Low cardinality value. + Examples are stored procedure name and URL path template. Required. + :vartype name: str + :ivar result_code: Result code of a dependency call. Examples are SQL error code and HTTP + status + code. + :vartype result_code: str + :ivar data: Command initiated by this dependency call. Examples are SQL statement and HTTP + URL with all query parameters. + :vartype data: str + :ivar type: Dependency type name. Very low cardinality value for logical grouping of + dependencies and interpretation of other fields like commandName and + resultCode. Examples are SQL, Azure table, and HTTP. + :vartype type: str + :ivar target: Target site of a dependency call. Examples are server name, host address. + :vartype target: str + :ivar duration: Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than 1000 days. + Required. + :vartype duration: str + :ivar success: Indication of successful or unsuccessful call. + :vartype success: bool + :ivar properties: Collection of custom properties. + :vartype properties: dict[str, str] + :ivar measurements: Collection of custom measurements. + :vartype measurements: dict[str, float] + """ + + id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Identifier of a dependency call instance. Used for correlation with the request + telemetry item corresponding to this dependency call.""" + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name of the command initiated with this dependency call. Low cardinality value. + Examples are stored procedure name and URL path template. Required.""" + result_code: Optional[str] = rest_field( + name="resultCode", visibility=["read", "create", "update", "delete", "query"] + ) + """Result code of a dependency call. Examples are SQL error code and HTTP status + code.""" + data: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Command initiated by this dependency call. Examples are SQL statement and HTTP + URL with all query parameters.""" + type: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Dependency type name. Very low cardinality value for logical grouping of + dependencies and interpretation of other fields like commandName and + resultCode. Examples are SQL, Azure table, and HTTP.""" + target: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Target site of a dependency call. Examples are server name, host address.""" + duration: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than 1000 days. Required.""" + success: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Indication of successful or unsuccessful call.""" + properties: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom properties.""" + measurements: Optional[dict[str, float]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom measurements.""" + + @overload + def __init__( + self, + *, + version: int, + name: str, + duration: str, + id: Optional[str] = None, # pylint: disable=redefined-builtin + result_code: Optional[str] = None, + data: Optional[str] = None, + type: Optional[str] = None, + target: Optional[str] = None, + success: Optional[bool] = None, + properties: Optional[dict[str, str]] = None, + measurements: Optional[dict[str, float]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RequestData(MonitorDomain): + """An instance of Request represents completion of an external request to the + application to do work and contains a summary of that request execution and the + results. + + :ivar version: Schema version. Required. + :vartype version: int + :ivar id: Identifier of a request call instance. Used for correlation between request and + other telemetry items. Required. + :vartype id: str + :ivar name: Name of the request. Represents code path taken to process request. Low + cardinality value to allow better grouping of requests. For HTTP requests it + represents the HTTP method and URL path template like 'GET /values/{id}'. + :vartype name: str + :ivar duration: Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than 1000 days. + Required. + :vartype duration: str + :ivar success: Indication of successful or unsuccessful call. Required. + :vartype success: bool + :ivar response_code: Result of a request execution. HTTP status code for HTTP requests. + Required. + :vartype response_code: str + :ivar source: Source of the request. Examples are the instrumentation key of the caller or + the ip address of the caller. + :vartype source: str + :ivar url: Request URL with all query string parameters. + :vartype url: str + :ivar properties: Collection of custom properties. + :vartype properties: dict[str, str] + :ivar measurements: Collection of custom measurements. + :vartype measurements: dict[str, float] + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Identifier of a request call instance. Used for correlation between request and + other telemetry items. Required.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name of the request. Represents code path taken to process request. Low + cardinality value to allow better grouping of requests. For HTTP requests it + represents the HTTP method and URL path template like 'GET /values/{id}'.""" + duration: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than 1000 days. Required.""" + success: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Indication of successful or unsuccessful call. Required.""" + response_code: str = rest_field(name="responseCode", visibility=["read", "create", "update", "delete", "query"]) + """Result of a request execution. HTTP status code for HTTP requests. Required.""" + source: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Source of the request. Examples are the instrumentation key of the caller or + the ip address of the caller.""" + url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Request URL with all query string parameters.""" + properties: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom properties.""" + measurements: Optional[dict[str, float]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom measurements.""" + + @overload + def __init__( + self, + *, + version: int, + id: str, # pylint: disable=redefined-builtin + duration: str, + success: bool, + response_code: str, + name: Optional[str] = None, + source: Optional[str] = None, + url: Optional[str] = None, + properties: Optional[dict[str, str]] = None, + measurements: Optional[dict[str, float]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class StackFrame(_Model): + """Stack frame information. + + :ivar level: Level in the stack. Required. + :vartype level: int + :ivar method: Method name. Required. + :vartype method: str + :ivar assembly: Name of the assembly (dll, jar, etc.) containing this function. + :vartype assembly: str + :ivar file_name: File name or URL of the method implementation. + :vartype file_name: str + :ivar line: Line number of the code implementation. + :vartype line: int + """ + + level: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Level in the stack. Required.""" + method: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Method name. Required.""" + assembly: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name of the assembly (dll, jar, etc.) containing this function.""" + file_name: Optional[str] = rest_field(name="fileName", visibility=["read", "create", "update", "delete", "query"]) + """File name or URL of the method implementation.""" + line: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Line number of the code implementation.""" + + @overload + def __init__( + self, + *, + level: int, + method: str, + assembly: Optional[str] = None, + file_name: Optional[str] = None, + line: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TelemetryErrorDetails(_Model): + """The error details. + + :ivar index: The index in the original payload of the item. + :vartype index: int + :ivar status_code: The item specific [HTTP Response status code](#Response Status Codes). + :vartype status_code: int + :ivar message: The error message. + :vartype message: str + """ + + index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The index in the original payload of the item.""" + status_code: Optional[int] = rest_field( + name="statusCode", visibility=["read", "create", "update", "delete", "query"] + ) + """The item specific [HTTP Response status code](#Response Status Codes).""" + message: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The error message.""" + + @overload + def __init__( + self, + *, + index: Optional[int] = None, + status_code: Optional[int] = None, + message: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TelemetryEventData(MonitorDomain): + """Instances of Event represent structured event records that can be grouped and + searched by their properties. Event data item also creates a metric of event + count by name. + + :ivar version: Schema version. Required. + :vartype version: int + :ivar name: Event name. Keep it low cardinality to allow proper grouping and useful metrics. + Required. + :vartype name: str + :ivar properties: Collection of custom properties. + :vartype properties: dict[str, str] + :ivar measurements: Collection of custom measurements. + :vartype measurements: dict[str, float] + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Event name. Keep it low cardinality to allow proper grouping and useful metrics. Required.""" + properties: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom properties.""" + measurements: Optional[dict[str, float]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom measurements.""" + + @overload + def __init__( + self, + *, + version: int, + name: str, + properties: Optional[dict[str, str]] = None, + measurements: Optional[dict[str, float]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TelemetryExceptionData(MonitorDomain): + """An instance of Exception represents a handled or unhandled exception that + occurred during execution of the monitored application. + + :ivar version: Schema version. Required. + :vartype version: int + :ivar exceptions: Exception chain - list of inner exceptions. Required. + :vartype exceptions: list[~exporter.models.TelemetryExceptionDetails] + :ivar severity_level: Severity level. Mostly used to indicate exception severity level when it + is + reported by logging library. Known values are: "Verbose", "Information", "Warning", "Error", + and "Critical". + :vartype severity_level: str or ~exporter.models.SeverityLevel + :ivar problem_id: Identifier of where the exception was thrown in code. Used for exceptions + grouping. Typically a combination of exception type and a function from the + call stack. + :vartype problem_id: str + :ivar properties: Collection of custom properties. + :vartype properties: dict[str, str] + :ivar measurements: Collection of custom measurements. + :vartype measurements: dict[str, float] + """ + + exceptions: list["_models.TelemetryExceptionDetails"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Exception chain - list of inner exceptions. Required.""" + severity_level: Optional[Union[str, "_models.SeverityLevel"]] = rest_field( + name="severityLevel", visibility=["read", "create", "update", "delete", "query"] + ) + """Severity level. Mostly used to indicate exception severity level when it is + reported by logging library. Known values are: \"Verbose\", \"Information\", \"Warning\", + \"Error\", and \"Critical\".""" + problem_id: Optional[str] = rest_field(name="problemId", visibility=["read", "create", "update", "delete", "query"]) + """Identifier of where the exception was thrown in code. Used for exceptions + grouping. Typically a combination of exception type and a function from the + call stack.""" + properties: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom properties.""" + measurements: Optional[dict[str, float]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collection of custom measurements.""" + + @overload + def __init__( + self, + *, + version: int, + exceptions: list["_models.TelemetryExceptionDetails"], + severity_level: Optional[Union[str, "_models.SeverityLevel"]] = None, + problem_id: Optional[str] = None, + properties: Optional[dict[str, str]] = None, + measurements: Optional[dict[str, float]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TelemetryExceptionDetails(_Model): + """Exception details of the exception in a chain. + + :ivar id: In case exception is nested (outer exception contains inner one), the id and + outerId properties are used to represent the nesting. + :vartype id: int + :ivar outer_id: The value of outerId is a reference to an element in ExceptionDetails that + represents the outer exception. + :vartype outer_id: int + :ivar type_name: Exception type name. + :vartype type_name: str + :ivar message: Exception message. Required. + :vartype message: str + :ivar has_full_stack: Indicates if full exception stack is provided in the exception. The stack + may + be trimmed, such as in the case of a StackOverflow exception. + :vartype has_full_stack: bool + :ivar stack: Text describing the stack. Either stack or parsedStack should have a value. + :vartype stack: str + :ivar parsed_stack: List of stack frames. Either stack or parsedStack should have a value. + :vartype parsed_stack: list[~exporter.models.StackFrame] + """ + + id: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """In case exception is nested (outer exception contains inner one), the id and + outerId properties are used to represent the nesting.""" + outer_id: Optional[int] = rest_field(name="outerId", visibility=["read", "create", "update", "delete", "query"]) + """The value of outerId is a reference to an element in ExceptionDetails that + represents the outer exception.""" + type_name: Optional[str] = rest_field(name="typeName", visibility=["read", "create", "update", "delete", "query"]) + """Exception type name.""" + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Exception message. Required.""" + has_full_stack: Optional[bool] = rest_field( + name="hasFullStack", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if full exception stack is provided in the exception. The stack may + be trimmed, such as in the case of a StackOverflow exception.""" + stack: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Text describing the stack. Either stack or parsedStack should have a value.""" + parsed_stack: Optional[list["_models.StackFrame"]] = rest_field( + name="parsedStack", visibility=["read", "create", "update", "delete", "query"] + ) + """List of stack frames. Either stack or parsedStack should have a value.""" + + @overload + def __init__( + self, + *, + message: str, + id: Optional[int] = None, # pylint: disable=redefined-builtin + outer_id: Optional[int] = None, + type_name: Optional[str] = None, + has_full_stack: Optional[bool] = None, + stack: Optional[str] = None, + parsed_stack: Optional[list["_models.StackFrame"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TelemetryItem(_Model): + """System variables for a telemetry item. + + :ivar version: Envelope version. For internal use only. By assigning this the default, it will + not be serialized within the payload unless changed to a value other than #1. + :vartype version: int + :ivar name: Type name of telemetry data item. Required. + :vartype name: str + :ivar time: Event date time when telemetry item was created. This is the wall clock time on + the client when the event was generated. There is no guarantee that the + client's time is accurate. This field must be formatted in UTC ISO 8601 format, + with a trailing 'Z' character, as described publicly on + `https://en.wikipedia.org/wiki/ISO_8601#UTC `_. + Note: the number of decimal seconds + digits provided are variable (and unspecified). Consumers should handle this, + i.e. managed code consumers should not use format 'O' for parsing as it + specifies a fixed length. Example: 2009-06-15T13:45:30.0000000Z. Required. + :vartype time: ~datetime.datetime + :ivar sample_rate: Sampling rate used in application. This telemetry item represents 100 / + sampleRate actual telemetry items. + :vartype sample_rate: float + :ivar sequence: Sequence field used to track absolute order of uploaded events. + :vartype sequence: str + :ivar instrumentation_key: The instrumentation key of the Application Insights resource. + :vartype instrumentation_key: str + :ivar tags: Key/value collection of context properties. See ContextTagKeys for information + on available properties. + :vartype tags: dict[str, str] + :ivar data: Telemetry data item. + :vartype data: ~exporter.models.MonitorBase + """ + + version: Optional[int] = rest_field(name="ver", visibility=["read", "create", "update", "delete", "query"]) + """Envelope version. For internal use only. By assigning this the default, it will + not be serialized within the payload unless changed to a value other than #1.""" + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Type name of telemetry data item. Required.""" + time: datetime.datetime = rest_field(visibility=["read", "create", "update", "delete", "query"], format="rfc3339") + """Event date time when telemetry item was created. This is the wall clock time on + the client when the event was generated. There is no guarantee that the + client's time is accurate. This field must be formatted in UTC ISO 8601 format, + with a trailing 'Z' character, as described publicly on + `https://en.wikipedia.org/wiki/ISO_8601#UTC `_. + Note: the number of decimal seconds + digits provided are variable (and unspecified). Consumers should handle this, + i.e. managed code consumers should not use format 'O' for parsing as it + specifies a fixed length. Example: 2009-06-15T13:45:30.0000000Z. Required.""" + sample_rate: Optional[float] = rest_field( + name="sampleRate", visibility=["read", "create", "update", "delete", "query"] + ) + """Sampling rate used in application. This telemetry item represents 100 / + sampleRate actual telemetry items.""" + sequence: Optional[str] = rest_field(name="seq", visibility=["read", "create", "update", "delete", "query"]) + """Sequence field used to track absolute order of uploaded events.""" + instrumentation_key: Optional[str] = rest_field( + name="iKey", visibility=["read", "create", "update", "delete", "query"] + ) + """The instrumentation key of the Application Insights resource.""" + tags: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Key/value collection of context properties. See ContextTagKeys for information + on available properties.""" + data: Optional["_models.MonitorBase"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Telemetry data item.""" + + @overload + def __init__( + self, + *, + name: str, + time: datetime.datetime, + version: Optional[int] = None, + sample_rate: Optional[float] = None, + sequence: Optional[str] = None, + instrumentation_key: Optional[str] = None, + tags: Optional[dict[str, str]] = None, + data: Optional["_models.MonitorBase"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TrackResponse(_Model): + """Response containing the status of each telemetry item. + + :ivar items_received: The number of items received. + :vartype items_received: int + :ivar items_accepted: The number of items accepted. + :vartype items_accepted: int + :ivar errors: An array of error detail objects. + :vartype errors: list[~exporter.models.TelemetryErrorDetails] + """ + + items_received: Optional[int] = rest_field( + name="itemsReceived", visibility=["read", "create", "update", "delete", "query"] + ) + """The number of items received.""" + items_accepted: Optional[int] = rest_field( + name="itemsAccepted", visibility=["read", "create", "update", "delete", "query"] + ) + """The number of items accepted.""" + errors: Optional[list["_models.TelemetryErrorDetails"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """An array of error detail objects.""" + + @overload + def __init__( + self, + *, + items_received: Optional[int] = None, + items_accepted: Optional[int] = None, + errors: Optional[list["_models.TelemetryErrorDetails"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/_patch.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/_patch.py new file mode 100644 index 000000000000..87676c65a8f0 --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/models/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" + + +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/py.typed b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/py.typed similarity index 100% rename from sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/py.typed rename to sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/exporter/py.typed diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/__init__.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/__init__.py deleted file mode 100644 index 2f29230ef836..000000000000 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/__init__.py +++ /dev/null @@ -1,73 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AvailabilityData - from ._models_py3 import MessageData - from ._models_py3 import MetricDataPoint - from ._models_py3 import MetricsData - from ._models_py3 import MonitorBase - from ._models_py3 import MonitorDomain - from ._models_py3 import PageViewData - from ._models_py3 import PageViewPerfData - from ._models_py3 import RemoteDependencyData - from ._models_py3 import RequestData - from ._models_py3 import StackFrame - from ._models_py3 import TelemetryErrorDetails - from ._models_py3 import TelemetryEventData - from ._models_py3 import TelemetryExceptionData - from ._models_py3 import TelemetryExceptionDetails - from ._models_py3 import TelemetryItem - from ._models_py3 import TrackResponse -except (SyntaxError, ImportError): - from ._models import AvailabilityData # type: ignore - from ._models import MessageData # type: ignore - from ._models import MetricDataPoint # type: ignore - from ._models import MetricsData # type: ignore - from ._models import MonitorBase # type: ignore - from ._models import MonitorDomain # type: ignore - from ._models import PageViewData # type: ignore - from ._models import PageViewPerfData # type: ignore - from ._models import RemoteDependencyData # type: ignore - from ._models import RequestData # type: ignore - from ._models import StackFrame # type: ignore - from ._models import TelemetryErrorDetails # type: ignore - from ._models import TelemetryEventData # type: ignore - from ._models import TelemetryExceptionData # type: ignore - from ._models import TelemetryExceptionDetails # type: ignore - from ._models import TelemetryItem # type: ignore - from ._models import TrackResponse # type: ignore - -from ._azure_monitor_client_enums import ( - ContextTagKeys, - DataPointType, - SeverityLevel, -) - -__all__ = [ - "AvailabilityData", - "MessageData", - "MetricDataPoint", - "MetricsData", - "MonitorBase", - "MonitorDomain", - "PageViewData", - "PageViewPerfData", - "RemoteDependencyData", - "RequestData", - "StackFrame", - "TelemetryErrorDetails", - "TelemetryEventData", - "TelemetryExceptionData", - "TelemetryExceptionDetails", - "TelemetryItem", - "TrackResponse", - "ContextTagKeys", - "DataPointType", - "SeverityLevel", -] diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/_models.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/_models.py deleted file mode 100644 index ceaf61192c50..000000000000 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/_models.py +++ /dev/null @@ -1,1167 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class MonitorDomain(msrest.serialization.Model): - """The abstract common base of all domains. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - """ - - _validation = { - "version": {"required": True}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - } - - def __init__(self, **kwargs): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - """ - super(MonitorDomain, self).__init__(**kwargs) - self.additional_properties = kwargs.get("additional_properties", None) - self.version = kwargs.get("version", 2) - - -class AvailabilityData(MonitorDomain): - """Instances of AvailabilityData represent the result of executing an availability test. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar id: Required. Identifier of a test run. Use it to correlate steps of test run and - telemetry generated by the service. - :vartype id: str - :ivar name: Required. Name of the test that these availability results represent. - :vartype name: str - :ivar duration: Required. Duration in format: DD.HH:MM:SS.MMMMMM. Must be less than 1000 days. - :vartype duration: str - :ivar success: Required. Success flag. - :vartype success: bool - :ivar run_location: Name of the location where the test was run from. - :vartype run_location: str - :ivar message: Diagnostic message for the result. - :vartype message: str - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "id": {"required": True, "max_length": 512, "min_length": 0}, - "name": {"required": True, "max_length": 1024, "min_length": 0}, - "duration": {"required": True}, - "success": {"required": True}, - "run_location": {"max_length": 1024, "min_length": 0}, - "message": {"max_length": 8192, "min_length": 0}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "id": {"key": "id", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "duration": {"key": "duration", "type": "str"}, - "success": {"key": "success", "type": "bool"}, - "run_location": {"key": "runLocation", "type": "str"}, - "message": {"key": "message", "type": "str"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__(self, **kwargs): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword id: Required. Identifier of a test run. Use it to correlate steps of test run and - telemetry generated by the service. - :paramtype id: str - :keyword name: Required. Name of the test that these availability results represent. - :paramtype name: str - :keyword duration: Required. Duration in format: DD.HH:MM:SS.MMMMMM. Must be less than 1000 - days. - :paramtype duration: str - :keyword success: Required. Success flag. - :paramtype success: bool - :keyword run_location: Name of the location where the test was run from. - :paramtype run_location: str - :keyword message: Diagnostic message for the result. - :paramtype message: str - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(AvailabilityData, self).__init__(**kwargs) - self.id = kwargs["id"] - self.name = kwargs["name"] - self.duration = kwargs["duration"] - self.success = kwargs["success"] - self.run_location = kwargs.get("run_location", None) - self.message = kwargs.get("message", None) - self.properties = kwargs.get("properties", None) - self.measurements = kwargs.get("measurements", None) - - -class MessageData(MonitorDomain): - """Instances of Message represent printf-like trace statements that are text-searched. Log4Net, NLog and other text-based log file entries are translated into instances of this type. The message does not have measurements. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar message: Required. Trace message. - :vartype message: str - :ivar severity_level: Trace severity level. Possible values include: "Verbose", "Information", - "Warning", "Error", "Critical". - :vartype severity_level: str or ~azure_monitor_client.models.SeverityLevel - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "message": {"required": True, "max_length": 32768, "min_length": 0}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "message": {"key": "message", "type": "str"}, - "severity_level": {"key": "severityLevel", "type": "str"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__(self, **kwargs): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword message: Required. Trace message. - :paramtype message: str - :keyword severity_level: Trace severity level. Possible values include: "Verbose", - "Information", "Warning", "Error", "Critical". - :paramtype severity_level: str or ~azure_monitor_client.models.SeverityLevel - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(MessageData, self).__init__(**kwargs) - self.message = kwargs["message"] - self.severity_level = kwargs.get("severity_level", None) - self.properties = kwargs.get("properties", None) - self.measurements = kwargs.get("measurements", None) - - -class MetricDataPoint(msrest.serialization.Model): - """Metric data single measurement. - - All required parameters must be populated in order to send to Azure. - - :ivar namespace: Namespace of the metric. - :vartype namespace: str - :ivar name: Required. Name of the metric. - :vartype name: str - :ivar data_point_type: Metric type. Single measurement or the aggregated value. Possible values - include: "Measurement", "Aggregation". - :vartype data_point_type: str or ~azure_monitor_client.models.DataPointType - :ivar value: Required. Single value for measurement. Sum of individual measurements for the - aggregation. - :vartype value: float - :ivar count: Metric weight of the aggregated metric. Should not be set for a measurement. - :vartype count: int - :ivar min: Minimum value of the aggregated metric. Should not be set for a measurement. - :vartype min: float - :ivar max: Maximum value of the aggregated metric. Should not be set for a measurement. - :vartype max: float - :ivar std_dev: Standard deviation of the aggregated metric. Should not be set for a - measurement. - :vartype std_dev: float - """ - - _validation = { - "namespace": {"max_length": 256, "min_length": 0}, - "name": {"required": True, "max_length": 1024, "min_length": 0}, - "value": {"required": True}, - } - - _attribute_map = { - "namespace": {"key": "ns", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "data_point_type": {"key": "kind", "type": "str"}, - "value": {"key": "value", "type": "float"}, - "count": {"key": "count", "type": "int"}, - "min": {"key": "min", "type": "float"}, - "max": {"key": "max", "type": "float"}, - "std_dev": {"key": "stdDev", "type": "float"}, - } - - def __init__(self, **kwargs): - """ - :keyword namespace: Namespace of the metric. - :paramtype namespace: str - :keyword name: Required. Name of the metric. - :paramtype name: str - :keyword data_point_type: Metric type. Single measurement or the aggregated value. Possible - values include: "Measurement", "Aggregation". - :paramtype data_point_type: str or ~azure_monitor_client.models.DataPointType - :keyword value: Required. Single value for measurement. Sum of individual measurements for the - aggregation. - :paramtype value: float - :keyword count: Metric weight of the aggregated metric. Should not be set for a measurement. - :paramtype count: int - :keyword min: Minimum value of the aggregated metric. Should not be set for a measurement. - :paramtype min: float - :keyword max: Maximum value of the aggregated metric. Should not be set for a measurement. - :paramtype max: float - :keyword std_dev: Standard deviation of the aggregated metric. Should not be set for a - measurement. - :paramtype std_dev: float - """ - super(MetricDataPoint, self).__init__(**kwargs) - self.namespace = kwargs.get("namespace", None) - self.name = kwargs["name"] - self.data_point_type = kwargs.get("data_point_type", None) - self.value = kwargs["value"] - self.count = kwargs.get("count", None) - self.min = kwargs.get("min", None) - self.max = kwargs.get("max", None) - self.std_dev = kwargs.get("std_dev", None) - - -class MetricsData(MonitorDomain): - """An instance of the Metric item is a list of measurements (single data points) and/or aggregations. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar metrics: Required. List of metrics. Only one metric in the list is currently supported by - Application Insights storage. If multiple data points were sent only the first one will be - used. - :vartype metrics: list[~azure_monitor_client.models.MetricDataPoint] - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - """ - - _validation = { - "version": {"required": True}, - "metrics": {"required": True}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "metrics": {"key": "metrics", "type": "[MetricDataPoint]"}, - "properties": {"key": "properties", "type": "{str}"}, - } - - def __init__(self, **kwargs): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword metrics: Required. List of metrics. Only one metric in the list is currently supported - by Application Insights storage. If multiple data points were sent only the first one will be - used. - :paramtype metrics: list[~azure_monitor_client.models.MetricDataPoint] - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - """ - super(MetricsData, self).__init__(**kwargs) - self.metrics = kwargs["metrics"] - self.properties = kwargs.get("properties", None) - - -class MonitorBase(msrest.serialization.Model): - """Data struct to contain only C section with custom fields. - - :ivar base_type: Name of item (B section) if any. If telemetry data is derived straight from - this, this should be null. - :vartype base_type: str - :ivar base_data: The data payload for the telemetry request. - :vartype base_data: ~azure_monitor_client.models.MonitorDomain - """ - - _attribute_map = { - "base_type": {"key": "baseType", "type": "str"}, - "base_data": {"key": "baseData", "type": "MonitorDomain"}, - } - - def __init__(self, **kwargs): - """ - :keyword base_type: Name of item (B section) if any. If telemetry data is derived straight from - this, this should be null. - :paramtype base_type: str - :keyword base_data: The data payload for the telemetry request. - :paramtype base_data: ~azure_monitor_client.models.MonitorDomain - """ - super(MonitorBase, self).__init__(**kwargs) - self.base_type = kwargs.get("base_type", None) - self.base_data = kwargs.get("base_data", None) - - -class PageViewData(MonitorDomain): - """An instance of PageView represents a generic action on a page like a button click. It is also the base type for PageView. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar id: Required. Identifier of a page view instance. Used for correlation between page view - and other telemetry items. - :vartype id: str - :ivar name: Required. Event name. Keep it low cardinality to allow proper grouping and useful - metrics. - :vartype name: str - :ivar url: Request URL with all query string parameters. - :vartype url: str - :ivar duration: Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view (PageViewData), - this is the duration. For a page view with performance information (PageViewPerfData), this is - the page load time. Must be less than 1000 days. - :vartype duration: str - :ivar referred_uri: Fully qualified page URI or URL of the referring page; if unknown, leave - blank. - :vartype referred_uri: str - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "id": {"required": True, "max_length": 512, "min_length": 0}, - "name": {"required": True, "max_length": 1024, "min_length": 0}, - "url": {"max_length": 2048, "min_length": 0}, - "referred_uri": {"max_length": 2048, "min_length": 0}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "id": {"key": "id", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "url": {"key": "url", "type": "str"}, - "duration": {"key": "duration", "type": "str"}, - "referred_uri": {"key": "referredUri", "type": "str"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__(self, **kwargs): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword id: Required. Identifier of a page view instance. Used for correlation between page - view and other telemetry items. - :paramtype id: str - :keyword name: Required. Event name. Keep it low cardinality to allow proper grouping and - useful metrics. - :paramtype name: str - :keyword url: Request URL with all query string parameters. - :paramtype url: str - :keyword duration: Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view - (PageViewData), this is the duration. For a page view with performance information - (PageViewPerfData), this is the page load time. Must be less than 1000 days. - :paramtype duration: str - :keyword referred_uri: Fully qualified page URI or URL of the referring page; if unknown, leave - blank. - :paramtype referred_uri: str - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(PageViewData, self).__init__(**kwargs) - self.id = kwargs["id"] - self.name = kwargs["name"] - self.url = kwargs.get("url", None) - self.duration = kwargs.get("duration", None) - self.referred_uri = kwargs.get("referred_uri", None) - self.properties = kwargs.get("properties", None) - self.measurements = kwargs.get("measurements", None) - - -class PageViewPerfData(MonitorDomain): - """An instance of PageViewPerf represents: a page view with no performance data, a page view with performance data, or just the performance data of an earlier page request. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar id: Required. Identifier of a page view instance. Used for correlation between page view - and other telemetry items. - :vartype id: str - :ivar name: Required. Event name. Keep it low cardinality to allow proper grouping and useful - metrics. - :vartype name: str - :ivar url: Request URL with all query string parameters. - :vartype url: str - :ivar duration: Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view (PageViewData), - this is the duration. For a page view with performance information (PageViewPerfData), this is - the page load time. Must be less than 1000 days. - :vartype duration: str - :ivar perf_total: Performance total in TimeSpan 'G' (general long) format: d:hh:mm:ss.fffffff. - :vartype perf_total: str - :ivar network_connect: Network connection time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :vartype network_connect: str - :ivar sent_request: Sent request time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :vartype sent_request: str - :ivar received_response: Received response time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :vartype received_response: str - :ivar dom_processing: DOM processing time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :vartype dom_processing: str - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "id": {"required": True, "max_length": 512, "min_length": 0}, - "name": {"required": True, "max_length": 1024, "min_length": 0}, - "url": {"max_length": 2048, "min_length": 0}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "id": {"key": "id", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "url": {"key": "url", "type": "str"}, - "duration": {"key": "duration", "type": "str"}, - "perf_total": {"key": "perfTotal", "type": "str"}, - "network_connect": {"key": "networkConnect", "type": "str"}, - "sent_request": {"key": "sentRequest", "type": "str"}, - "received_response": {"key": "receivedResponse", "type": "str"}, - "dom_processing": {"key": "domProcessing", "type": "str"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__(self, **kwargs): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword id: Required. Identifier of a page view instance. Used for correlation between page - view and other telemetry items. - :paramtype id: str - :keyword name: Required. Event name. Keep it low cardinality to allow proper grouping and - useful metrics. - :paramtype name: str - :keyword url: Request URL with all query string parameters. - :paramtype url: str - :keyword duration: Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view - (PageViewData), this is the duration. For a page view with performance information - (PageViewPerfData), this is the page load time. Must be less than 1000 days. - :paramtype duration: str - :keyword perf_total: Performance total in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :paramtype perf_total: str - :keyword network_connect: Network connection time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :paramtype network_connect: str - :keyword sent_request: Sent request time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :paramtype sent_request: str - :keyword received_response: Received response time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :paramtype received_response: str - :keyword dom_processing: DOM processing time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :paramtype dom_processing: str - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(PageViewPerfData, self).__init__(**kwargs) - self.id = kwargs["id"] - self.name = kwargs["name"] - self.url = kwargs.get("url", None) - self.duration = kwargs.get("duration", None) - self.perf_total = kwargs.get("perf_total", None) - self.network_connect = kwargs.get("network_connect", None) - self.sent_request = kwargs.get("sent_request", None) - self.received_response = kwargs.get("received_response", None) - self.dom_processing = kwargs.get("dom_processing", None) - self.properties = kwargs.get("properties", None) - self.measurements = kwargs.get("measurements", None) - - -class RemoteDependencyData(MonitorDomain): - """An instance of Remote Dependency represents an interaction of the monitored component with a remote component/service like SQL or an HTTP endpoint. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar id: Identifier of a dependency call instance. Used for correlation with the request - telemetry item corresponding to this dependency call. - :vartype id: str - :ivar name: Required. Name of the command initiated with this dependency call. Low cardinality - value. Examples are stored procedure name and URL path template. - :vartype name: str - :ivar result_code: Result code of a dependency call. Examples are SQL error code and HTTP - status code. - :vartype result_code: str - :ivar data: Command initiated by this dependency call. Examples are SQL statement and HTTP URL - with all query parameters. - :vartype data: str - :ivar type: Dependency type name. Very low cardinality value for logical grouping of - dependencies and interpretation of other fields like commandName and resultCode. Examples are - SQL, Azure table, and HTTP. - :vartype type: str - :ivar target: Target site of a dependency call. Examples are server name, host address. - :vartype target: str - :ivar duration: Required. Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than - 1000 days. - :vartype duration: str - :ivar success: Indication of successful or unsuccessful call. - :vartype success: bool - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "id": {"max_length": 512, "min_length": 0}, - "name": {"required": True, "max_length": 1024, "min_length": 0}, - "result_code": {"max_length": 1024, "min_length": 0}, - "data": {"max_length": 8192, "min_length": 0}, - "type": {"max_length": 1024, "min_length": 0}, - "target": {"max_length": 1024, "min_length": 0}, - "duration": {"required": True}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "id": {"key": "id", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "result_code": {"key": "resultCode", "type": "str"}, - "data": {"key": "data", "type": "str"}, - "type": {"key": "type", "type": "str"}, - "target": {"key": "target", "type": "str"}, - "duration": {"key": "duration", "type": "str"}, - "success": {"key": "success", "type": "bool"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__(self, **kwargs): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword id: Identifier of a dependency call instance. Used for correlation with the request - telemetry item corresponding to this dependency call. - :paramtype id: str - :keyword name: Required. Name of the command initiated with this dependency call. Low - cardinality value. Examples are stored procedure name and URL path template. - :paramtype name: str - :keyword result_code: Result code of a dependency call. Examples are SQL error code and HTTP - status code. - :paramtype result_code: str - :keyword data: Command initiated by this dependency call. Examples are SQL statement and HTTP - URL with all query parameters. - :paramtype data: str - :keyword type: Dependency type name. Very low cardinality value for logical grouping of - dependencies and interpretation of other fields like commandName and resultCode. Examples are - SQL, Azure table, and HTTP. - :paramtype type: str - :keyword target: Target site of a dependency call. Examples are server name, host address. - :paramtype target: str - :keyword duration: Required. Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than - 1000 days. - :paramtype duration: str - :keyword success: Indication of successful or unsuccessful call. - :paramtype success: bool - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(RemoteDependencyData, self).__init__(**kwargs) - self.id = kwargs.get("id", None) - self.name = kwargs["name"] - self.result_code = kwargs.get("result_code", None) - self.data = kwargs.get("data", None) - self.type = kwargs.get("type", None) - self.target = kwargs.get("target", None) - self.duration = kwargs["duration"] - self.success = kwargs.get("success", True) - self.properties = kwargs.get("properties", None) - self.measurements = kwargs.get("measurements", None) - - -class RequestData(MonitorDomain): - """An instance of Request represents completion of an external request to the application to do work and contains a summary of that request execution and the results. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar id: Required. Identifier of a request call instance. Used for correlation between request - and other telemetry items. - :vartype id: str - :ivar name: Name of the request. Represents code path taken to process request. Low cardinality - value to allow better grouping of requests. For HTTP requests it represents the HTTP method and - URL path template like 'GET /values/{id}'. - :vartype name: str - :ivar duration: Required. Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than - 1000 days. - :vartype duration: str - :ivar success: Required. Indication of successful or unsuccessful call. - :vartype success: bool - :ivar response_code: Required. Result of a request execution. HTTP status code for HTTP - requests. - :vartype response_code: str - :ivar source: Source of the request. Examples are the instrumentation key of the caller or the - ip address of the caller. - :vartype source: str - :ivar url: Request URL with all query string parameters. - :vartype url: str - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "id": {"required": True, "max_length": 512, "min_length": 0}, - "name": {"max_length": 1024, "min_length": 0}, - "duration": {"required": True}, - "success": {"required": True}, - "response_code": {"required": True, "max_length": 1024, "min_length": 0}, - "source": {"max_length": 1024, "min_length": 0}, - "url": {"max_length": 2048, "min_length": 0}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "id": {"key": "id", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "duration": {"key": "duration", "type": "str"}, - "success": {"key": "success", "type": "bool"}, - "response_code": {"key": "responseCode", "type": "str"}, - "source": {"key": "source", "type": "str"}, - "url": {"key": "url", "type": "str"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__(self, **kwargs): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword id: Required. Identifier of a request call instance. Used for correlation between - request and other telemetry items. - :paramtype id: str - :keyword name: Name of the request. Represents code path taken to process request. Low - cardinality value to allow better grouping of requests. For HTTP requests it represents the - HTTP method and URL path template like 'GET /values/{id}'. - :paramtype name: str - :keyword duration: Required. Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than - 1000 days. - :paramtype duration: str - :keyword success: Required. Indication of successful or unsuccessful call. - :paramtype success: bool - :keyword response_code: Required. Result of a request execution. HTTP status code for HTTP - requests. - :paramtype response_code: str - :keyword source: Source of the request. Examples are the instrumentation key of the caller or - the ip address of the caller. - :paramtype source: str - :keyword url: Request URL with all query string parameters. - :paramtype url: str - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(RequestData, self).__init__(**kwargs) - self.id = kwargs["id"] - self.name = kwargs.get("name", None) - self.duration = kwargs["duration"] - self.success = kwargs.get("success", True) - self.response_code = kwargs["response_code"] - self.source = kwargs.get("source", None) - self.url = kwargs.get("url", None) - self.properties = kwargs.get("properties", None) - self.measurements = kwargs.get("measurements", None) - - -class StackFrame(msrest.serialization.Model): - """Stack frame information. - - All required parameters must be populated in order to send to Azure. - - :ivar level: Required. - :vartype level: int - :ivar method: Required. Method name. - :vartype method: str - :ivar assembly: Name of the assembly (dll, jar, etc.) containing this function. - :vartype assembly: str - :ivar file_name: File name or URL of the method implementation. - :vartype file_name: str - :ivar line: Line number of the code implementation. - :vartype line: int - """ - - _validation = { - "level": {"required": True}, - "method": {"required": True, "max_length": 1024, "min_length": 0}, - "assembly": {"max_length": 1024, "min_length": 0}, - "file_name": {"max_length": 1024, "min_length": 0}, - } - - _attribute_map = { - "level": {"key": "level", "type": "int"}, - "method": {"key": "method", "type": "str"}, - "assembly": {"key": "assembly", "type": "str"}, - "file_name": {"key": "fileName", "type": "str"}, - "line": {"key": "line", "type": "int"}, - } - - def __init__(self, **kwargs): - """ - :keyword level: Required. - :paramtype level: int - :keyword method: Required. Method name. - :paramtype method: str - :keyword assembly: Name of the assembly (dll, jar, etc.) containing this function. - :paramtype assembly: str - :keyword file_name: File name or URL of the method implementation. - :paramtype file_name: str - :keyword line: Line number of the code implementation. - :paramtype line: int - """ - super(StackFrame, self).__init__(**kwargs) - self.level = kwargs["level"] - self.method = kwargs["method"] - self.assembly = kwargs.get("assembly", None) - self.file_name = kwargs.get("file_name", None) - self.line = kwargs.get("line", None) - - -class TelemetryErrorDetails(msrest.serialization.Model): - """The error details. - - :ivar index: The index in the original payload of the item. - :vartype index: int - :ivar status_code: The item specific `HTTP Response status code <#Response Status Codes>`_. - :vartype status_code: int - :ivar message: The error message. - :vartype message: str - """ - - _attribute_map = { - "index": {"key": "index", "type": "int"}, - "status_code": {"key": "statusCode", "type": "int"}, - "message": {"key": "message", "type": "str"}, - } - - def __init__(self, **kwargs): - """ - :keyword index: The index in the original payload of the item. - :paramtype index: int - :keyword status_code: The item specific `HTTP Response status code <#Response Status Codes>`_. - :paramtype status_code: int - :keyword message: The error message. - :paramtype message: str - """ - super(TelemetryErrorDetails, self).__init__(**kwargs) - self.index = kwargs.get("index", None) - self.status_code = kwargs.get("status_code", None) - self.message = kwargs.get("message", None) - - -class TelemetryEventData(MonitorDomain): - """Instances of Event represent structured event records that can be grouped and searched by their properties. Event data item also creates a metric of event count by name. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar name: Required. Event name. Keep it low cardinality to allow proper grouping and useful - metrics. - :vartype name: str - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "name": {"required": True, "max_length": 512, "min_length": 0}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "name": {"key": "name", "type": "str"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__(self, **kwargs): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword name: Required. Event name. Keep it low cardinality to allow proper grouping and - useful metrics. - :paramtype name: str - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(TelemetryEventData, self).__init__(**kwargs) - self.name = kwargs["name"] - self.properties = kwargs.get("properties", None) - self.measurements = kwargs.get("measurements", None) - - -class TelemetryExceptionData(MonitorDomain): - """An instance of Exception represents a handled or unhandled exception that occurred during execution of the monitored application. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar exceptions: Required. Exception chain - list of inner exceptions. - :vartype exceptions: list[~azure_monitor_client.models.TelemetryExceptionDetails] - :ivar severity_level: Severity level. Mostly used to indicate exception severity level when it - is reported by logging library. Possible values include: "Verbose", "Information", "Warning", - "Error", "Critical". - :vartype severity_level: str or ~azure_monitor_client.models.SeverityLevel - :ivar problem_id: Identifier of where the exception was thrown in code. Used for exceptions - grouping. Typically a combination of exception type and a function from the call stack. - :vartype problem_id: str - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "exceptions": {"required": True}, - "problem_id": {"max_length": 1024, "min_length": 0}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "exceptions": {"key": "exceptions", "type": "[TelemetryExceptionDetails]"}, - "severity_level": {"key": "severityLevel", "type": "str"}, - "problem_id": {"key": "problemId", "type": "str"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__(self, **kwargs): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword exceptions: Required. Exception chain - list of inner exceptions. - :paramtype exceptions: list[~azure_monitor_client.models.TelemetryExceptionDetails] - :keyword severity_level: Severity level. Mostly used to indicate exception severity level when - it is reported by logging library. Possible values include: "Verbose", "Information", - "Warning", "Error", "Critical". - :paramtype severity_level: str or ~azure_monitor_client.models.SeverityLevel - :keyword problem_id: Identifier of where the exception was thrown in code. Used for exceptions - grouping. Typically a combination of exception type and a function from the call stack. - :paramtype problem_id: str - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(TelemetryExceptionData, self).__init__(**kwargs) - self.exceptions = kwargs["exceptions"] - self.severity_level = kwargs.get("severity_level", None) - self.problem_id = kwargs.get("problem_id", None) - self.properties = kwargs.get("properties", None) - self.measurements = kwargs.get("measurements", None) - - -class TelemetryExceptionDetails(msrest.serialization.Model): - """Exception details of the exception in a chain. - - All required parameters must be populated in order to send to Azure. - - :ivar id: In case exception is nested (outer exception contains inner one), the id and outerId - properties are used to represent the nesting. - :vartype id: int - :ivar outer_id: The value of outerId is a reference to an element in ExceptionDetails that - represents the outer exception. - :vartype outer_id: int - :ivar type_name: Exception type name. - :vartype type_name: str - :ivar message: Required. Exception message. - :vartype message: str - :ivar has_full_stack: Indicates if full exception stack is provided in the exception. The stack - may be trimmed, such as in the case of a StackOverflow exception. - :vartype has_full_stack: bool - :ivar stack: Text describing the stack. Either stack or parsedStack should have a value. - :vartype stack: str - :ivar parsed_stack: List of stack frames. Either stack or parsedStack should have a value. - :vartype parsed_stack: list[~azure_monitor_client.models.StackFrame] - """ - - _validation = { - "type_name": {"max_length": 1024, "min_length": 0}, - "message": {"required": True, "max_length": 32768, "min_length": 0}, - "stack": {"max_length": 32768, "min_length": 0}, - } - - _attribute_map = { - "id": {"key": "id", "type": "int"}, - "outer_id": {"key": "outerId", "type": "int"}, - "type_name": {"key": "typeName", "type": "str"}, - "message": {"key": "message", "type": "str"}, - "has_full_stack": {"key": "hasFullStack", "type": "bool"}, - "stack": {"key": "stack", "type": "str"}, - "parsed_stack": {"key": "parsedStack", "type": "[StackFrame]"}, - } - - def __init__(self, **kwargs): - """ - :keyword id: In case exception is nested (outer exception contains inner one), the id and - outerId properties are used to represent the nesting. - :paramtype id: int - :keyword outer_id: The value of outerId is a reference to an element in ExceptionDetails that - represents the outer exception. - :paramtype outer_id: int - :keyword type_name: Exception type name. - :paramtype type_name: str - :keyword message: Required. Exception message. - :paramtype message: str - :keyword has_full_stack: Indicates if full exception stack is provided in the exception. The - stack may be trimmed, such as in the case of a StackOverflow exception. - :paramtype has_full_stack: bool - :keyword stack: Text describing the stack. Either stack or parsedStack should have a value. - :paramtype stack: str - :keyword parsed_stack: List of stack frames. Either stack or parsedStack should have a value. - :paramtype parsed_stack: list[~azure_monitor_client.models.StackFrame] - """ - super(TelemetryExceptionDetails, self).__init__(**kwargs) - self.id = kwargs.get("id", None) - self.outer_id = kwargs.get("outer_id", None) - self.type_name = kwargs.get("type_name", None) - self.message = kwargs["message"] - self.has_full_stack = kwargs.get("has_full_stack", True) - self.stack = kwargs.get("stack", None) - self.parsed_stack = kwargs.get("parsed_stack", None) - - -class TelemetryItem(msrest.serialization.Model): - """System variables for a telemetry item. - - All required parameters must be populated in order to send to Azure. - - :ivar version: Envelope version. For internal use only. By assigning this the default, it will - not be serialized within the payload unless changed to a value other than #1. - :vartype version: int - :ivar name: Required. Type name of telemetry data item. - :vartype name: str - :ivar time: Required. Event date time when telemetry item was created. This is the wall clock - time on the client when the event was generated. There is no guarantee that the client's time - is accurate. This field must be formatted in UTC ISO 8601 format, with a trailing 'Z' - character, as described publicly on https://en.wikipedia.org/wiki/ISO_8601#UTC. Note: the - number of decimal seconds digits provided are variable (and unspecified). Consumers should - handle this, i.e. managed code consumers should not use format 'O' for parsing as it specifies - a fixed length. Example: 2009-06-15T13:45:30.0000000Z. - :vartype time: ~datetime.datetime - :ivar sample_rate: Sampling rate used in application. This telemetry item represents 100 / - sampleRate actual telemetry items. - :vartype sample_rate: float - :ivar sequence: Sequence field used to track absolute order of uploaded events. - :vartype sequence: str - :ivar instrumentation_key: The instrumentation key of the Application Insights resource. - :vartype instrumentation_key: str - :ivar tags: A set of tags. Key/value collection of context properties. See ContextTagKeys for - information on available properties. - :vartype tags: dict[str, str] - :ivar data: Telemetry data item. - :vartype data: ~azure_monitor_client.models.MonitorBase - """ - - _validation = { - "name": {"required": True}, - "time": {"required": True}, - "sequence": {"max_length": 64, "min_length": 0}, - } - - _attribute_map = { - "version": {"key": "ver", "type": "int"}, - "name": {"key": "name", "type": "str"}, - "time": {"key": "time", "type": "iso-8601"}, - "sample_rate": {"key": "sampleRate", "type": "float"}, - "sequence": {"key": "seq", "type": "str"}, - "instrumentation_key": {"key": "iKey", "type": "str"}, - "tags": {"key": "tags", "type": "{str}"}, - "data": {"key": "data", "type": "MonitorBase"}, - } - - def __init__(self, **kwargs): - """ - :keyword version: Envelope version. For internal use only. By assigning this the default, it - will not be serialized within the payload unless changed to a value other than #1. - :paramtype version: int - :keyword name: Required. Type name of telemetry data item. - :paramtype name: str - :keyword time: Required. Event date time when telemetry item was created. This is the wall - clock time on the client when the event was generated. There is no guarantee that the client's - time is accurate. This field must be formatted in UTC ISO 8601 format, with a trailing 'Z' - character, as described publicly on https://en.wikipedia.org/wiki/ISO_8601#UTC. Note: the - number of decimal seconds digits provided are variable (and unspecified). Consumers should - handle this, i.e. managed code consumers should not use format 'O' for parsing as it specifies - a fixed length. Example: 2009-06-15T13:45:30.0000000Z. - :paramtype time: ~datetime.datetime - :keyword sample_rate: Sampling rate used in application. This telemetry item represents 100 / - sampleRate actual telemetry items. - :paramtype sample_rate: float - :keyword sequence: Sequence field used to track absolute order of uploaded events. - :paramtype sequence: str - :keyword instrumentation_key: The instrumentation key of the Application Insights resource. - :paramtype instrumentation_key: str - :keyword tags: A set of tags. Key/value collection of context properties. See ContextTagKeys - for information on available properties. - :paramtype tags: dict[str, str] - :keyword data: Telemetry data item. - :paramtype data: ~azure_monitor_client.models.MonitorBase - """ - super(TelemetryItem, self).__init__(**kwargs) - self.version = kwargs.get("version", 1) - self.name = kwargs["name"] - self.time = kwargs["time"] - self.sample_rate = kwargs.get("sample_rate", 100) - self.sequence = kwargs.get("sequence", None) - self.instrumentation_key = kwargs.get("instrumentation_key", None) - self.tags = kwargs.get("tags", None) - self.data = kwargs.get("data", None) - - -class TrackResponse(msrest.serialization.Model): - """Response containing the status of each telemetry item. - - :ivar items_received: The number of items received. - :vartype items_received: int - :ivar items_accepted: The number of items accepted. - :vartype items_accepted: int - :ivar errors: An array of error detail objects. - :vartype errors: list[~azure_monitor_client.models.TelemetryErrorDetails] - """ - - _attribute_map = { - "items_received": {"key": "itemsReceived", "type": "int"}, - "items_accepted": {"key": "itemsAccepted", "type": "int"}, - "errors": {"key": "errors", "type": "[TelemetryErrorDetails]"}, - } - - def __init__(self, **kwargs): - """ - :keyword items_received: The number of items received. - :paramtype items_received: int - :keyword items_accepted: The number of items accepted. - :paramtype items_accepted: int - :keyword errors: An array of error detail objects. - :paramtype errors: list[~azure_monitor_client.models.TelemetryErrorDetails] - """ - super(TrackResponse, self).__init__(**kwargs) - self.items_received = kwargs.get("items_received", None) - self.items_accepted = kwargs.get("items_accepted", None) - self.errors = kwargs.get("errors", None) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/_models_py3.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/_models_py3.py deleted file mode 100644 index fefe33af86cb..000000000000 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/models/_models_py3.py +++ /dev/null @@ -1,1342 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import Any, Dict, List, Optional, Union - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - -from ._azure_monitor_client_enums import * - - -class MonitorDomain(msrest.serialization.Model): - """The abstract common base of all domains. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - """ - - _validation = { - "version": {"required": True}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - } - - def __init__(self, *, version: int = 2, additional_properties: Optional[Dict[str, Any]] = None, **kwargs): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - """ - super(MonitorDomain, self).__init__(**kwargs) - self.additional_properties = additional_properties - self.version = version - - -class AvailabilityData(MonitorDomain): - """Instances of AvailabilityData represent the result of executing an availability test. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar id: Required. Identifier of a test run. Use it to correlate steps of test run and - telemetry generated by the service. - :vartype id: str - :ivar name: Required. Name of the test that these availability results represent. - :vartype name: str - :ivar duration: Required. Duration in format: DD.HH:MM:SS.MMMMMM. Must be less than 1000 days. - :vartype duration: str - :ivar success: Required. Success flag. - :vartype success: bool - :ivar run_location: Name of the location where the test was run from. - :vartype run_location: str - :ivar message: Diagnostic message for the result. - :vartype message: str - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "id": {"required": True, "max_length": 512, "min_length": 0}, - "name": {"required": True, "max_length": 1024, "min_length": 0}, - "duration": {"required": True}, - "success": {"required": True}, - "run_location": {"max_length": 1024, "min_length": 0}, - "message": {"max_length": 8192, "min_length": 0}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "id": {"key": "id", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "duration": {"key": "duration", "type": "str"}, - "success": {"key": "success", "type": "bool"}, - "run_location": {"key": "runLocation", "type": "str"}, - "message": {"key": "message", "type": "str"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__( - self, - *, - version: int = 2, - id: str, - name: str, - duration: str, - success: bool, - additional_properties: Optional[Dict[str, Any]] = None, - run_location: Optional[str] = None, - message: Optional[str] = None, - properties: Optional[Dict[str, str]] = None, - measurements: Optional[Dict[str, float]] = None, - **kwargs - ): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword id: Required. Identifier of a test run. Use it to correlate steps of test run and - telemetry generated by the service. - :paramtype id: str - :keyword name: Required. Name of the test that these availability results represent. - :paramtype name: str - :keyword duration: Required. Duration in format: DD.HH:MM:SS.MMMMMM. Must be less than 1000 - days. - :paramtype duration: str - :keyword success: Required. Success flag. - :paramtype success: bool - :keyword run_location: Name of the location where the test was run from. - :paramtype run_location: str - :keyword message: Diagnostic message for the result. - :paramtype message: str - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(AvailabilityData, self).__init__(additional_properties=additional_properties, version=version, **kwargs) - self.id = id - self.name = name - self.duration = duration - self.success = success - self.run_location = run_location - self.message = message - self.properties = properties - self.measurements = measurements - - -class MessageData(MonitorDomain): - """Instances of Message represent printf-like trace statements that are text-searched. Log4Net, NLog and other text-based log file entries are translated into instances of this type. The message does not have measurements. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar message: Required. Trace message. - :vartype message: str - :ivar severity_level: Trace severity level. Possible values include: "Verbose", "Information", - "Warning", "Error", "Critical". - :vartype severity_level: str or ~azure_monitor_client.models.SeverityLevel - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "message": {"required": True, "max_length": 32768, "min_length": 0}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "message": {"key": "message", "type": "str"}, - "severity_level": {"key": "severityLevel", "type": "str"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__( - self, - *, - version: int = 2, - message: str, - additional_properties: Optional[Dict[str, Any]] = None, - severity_level: Optional[Union[str, "SeverityLevel"]] = None, - properties: Optional[Dict[str, str]] = None, - measurements: Optional[Dict[str, float]] = None, - **kwargs - ): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword message: Required. Trace message. - :paramtype message: str - :keyword severity_level: Trace severity level. Possible values include: "Verbose", - "Information", "Warning", "Error", "Critical". - :paramtype severity_level: str or ~azure_monitor_client.models.SeverityLevel - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(MessageData, self).__init__(additional_properties=additional_properties, version=version, **kwargs) - self.message = message - self.severity_level = severity_level - self.properties = properties - self.measurements = measurements - - -class MetricDataPoint(msrest.serialization.Model): - """Metric data single measurement. - - All required parameters must be populated in order to send to Azure. - - :ivar namespace: Namespace of the metric. - :vartype namespace: str - :ivar name: Required. Name of the metric. - :vartype name: str - :ivar data_point_type: Metric type. Single measurement or the aggregated value. Possible values - include: "Measurement", "Aggregation". - :vartype data_point_type: str or ~azure_monitor_client.models.DataPointType - :ivar value: Required. Single value for measurement. Sum of individual measurements for the - aggregation. - :vartype value: float - :ivar count: Metric weight of the aggregated metric. Should not be set for a measurement. - :vartype count: int - :ivar min: Minimum value of the aggregated metric. Should not be set for a measurement. - :vartype min: float - :ivar max: Maximum value of the aggregated metric. Should not be set for a measurement. - :vartype max: float - :ivar std_dev: Standard deviation of the aggregated metric. Should not be set for a - measurement. - :vartype std_dev: float - """ - - _validation = { - "namespace": {"max_length": 256, "min_length": 0}, - "name": {"required": True, "max_length": 1024, "min_length": 0}, - "value": {"required": True}, - } - - _attribute_map = { - "namespace": {"key": "ns", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "data_point_type": {"key": "kind", "type": "str"}, - "value": {"key": "value", "type": "float"}, - "count": {"key": "count", "type": "int"}, - "min": {"key": "min", "type": "float"}, - "max": {"key": "max", "type": "float"}, - "std_dev": {"key": "stdDev", "type": "float"}, - } - - def __init__( - self, - *, - name: str, - value: float, - namespace: Optional[str] = None, - data_point_type: Optional[Union[str, "DataPointType"]] = None, - count: Optional[int] = None, - min: Optional[float] = None, - max: Optional[float] = None, - std_dev: Optional[float] = None, - **kwargs - ): - """ - :keyword namespace: Namespace of the metric. - :paramtype namespace: str - :keyword name: Required. Name of the metric. - :paramtype name: str - :keyword data_point_type: Metric type. Single measurement or the aggregated value. Possible - values include: "Measurement", "Aggregation". - :paramtype data_point_type: str or ~azure_monitor_client.models.DataPointType - :keyword value: Required. Single value for measurement. Sum of individual measurements for the - aggregation. - :paramtype value: float - :keyword count: Metric weight of the aggregated metric. Should not be set for a measurement. - :paramtype count: int - :keyword min: Minimum value of the aggregated metric. Should not be set for a measurement. - :paramtype min: float - :keyword max: Maximum value of the aggregated metric. Should not be set for a measurement. - :paramtype max: float - :keyword std_dev: Standard deviation of the aggregated metric. Should not be set for a - measurement. - :paramtype std_dev: float - """ - super(MetricDataPoint, self).__init__(**kwargs) - self.namespace = namespace - self.name = name - self.data_point_type = data_point_type - self.value = value - self.count = count - self.min = min - self.max = max - self.std_dev = std_dev - - -class MetricsData(MonitorDomain): - """An instance of the Metric item is a list of measurements (single data points) and/or aggregations. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar metrics: Required. List of metrics. Only one metric in the list is currently supported by - Application Insights storage. If multiple data points were sent only the first one will be - used. - :vartype metrics: list[~azure_monitor_client.models.MetricDataPoint] - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - """ - - _validation = { - "version": {"required": True}, - "metrics": {"required": True}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "metrics": {"key": "metrics", "type": "[MetricDataPoint]"}, - "properties": {"key": "properties", "type": "{str}"}, - } - - def __init__( - self, - *, - version: int = 2, - metrics: List["MetricDataPoint"], - additional_properties: Optional[Dict[str, Any]] = None, - properties: Optional[Dict[str, str]] = None, - **kwargs - ): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword metrics: Required. List of metrics. Only one metric in the list is currently supported - by Application Insights storage. If multiple data points were sent only the first one will be - used. - :paramtype metrics: list[~azure_monitor_client.models.MetricDataPoint] - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - """ - super(MetricsData, self).__init__(additional_properties=additional_properties, version=version, **kwargs) - self.metrics = metrics - self.properties = properties - - -class MonitorBase(msrest.serialization.Model): - """Data struct to contain only C section with custom fields. - - :ivar base_type: Name of item (B section) if any. If telemetry data is derived straight from - this, this should be null. - :vartype base_type: str - :ivar base_data: The data payload for the telemetry request. - :vartype base_data: ~azure_monitor_client.models.MonitorDomain - """ - - _attribute_map = { - "base_type": {"key": "baseType", "type": "str"}, - "base_data": {"key": "baseData", "type": "MonitorDomain"}, - } - - def __init__(self, *, base_type: Optional[str] = None, base_data: Optional["MonitorDomain"] = None, **kwargs): - """ - :keyword base_type: Name of item (B section) if any. If telemetry data is derived straight from - this, this should be null. - :paramtype base_type: str - :keyword base_data: The data payload for the telemetry request. - :paramtype base_data: ~azure_monitor_client.models.MonitorDomain - """ - super(MonitorBase, self).__init__(**kwargs) - self.base_type = base_type - self.base_data = base_data - - -class PageViewData(MonitorDomain): - """An instance of PageView represents a generic action on a page like a button click. It is also the base type for PageView. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar id: Required. Identifier of a page view instance. Used for correlation between page view - and other telemetry items. - :vartype id: str - :ivar name: Required. Event name. Keep it low cardinality to allow proper grouping and useful - metrics. - :vartype name: str - :ivar url: Request URL with all query string parameters. - :vartype url: str - :ivar duration: Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view (PageViewData), - this is the duration. For a page view with performance information (PageViewPerfData), this is - the page load time. Must be less than 1000 days. - :vartype duration: str - :ivar referred_uri: Fully qualified page URI or URL of the referring page; if unknown, leave - blank. - :vartype referred_uri: str - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "id": {"required": True, "max_length": 512, "min_length": 0}, - "name": {"required": True, "max_length": 1024, "min_length": 0}, - "url": {"max_length": 2048, "min_length": 0}, - "referred_uri": {"max_length": 2048, "min_length": 0}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "id": {"key": "id", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "url": {"key": "url", "type": "str"}, - "duration": {"key": "duration", "type": "str"}, - "referred_uri": {"key": "referredUri", "type": "str"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__( - self, - *, - version: int = 2, - id: str, - name: str, - additional_properties: Optional[Dict[str, Any]] = None, - url: Optional[str] = None, - duration: Optional[str] = None, - referred_uri: Optional[str] = None, - properties: Optional[Dict[str, str]] = None, - measurements: Optional[Dict[str, float]] = None, - **kwargs - ): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword id: Required. Identifier of a page view instance. Used for correlation between page - view and other telemetry items. - :paramtype id: str - :keyword name: Required. Event name. Keep it low cardinality to allow proper grouping and - useful metrics. - :paramtype name: str - :keyword url: Request URL with all query string parameters. - :paramtype url: str - :keyword duration: Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view - (PageViewData), this is the duration. For a page view with performance information - (PageViewPerfData), this is the page load time. Must be less than 1000 days. - :paramtype duration: str - :keyword referred_uri: Fully qualified page URI or URL of the referring page; if unknown, leave - blank. - :paramtype referred_uri: str - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(PageViewData, self).__init__(additional_properties=additional_properties, version=version, **kwargs) - self.id = id - self.name = name - self.url = url - self.duration = duration - self.referred_uri = referred_uri - self.properties = properties - self.measurements = measurements - - -class PageViewPerfData(MonitorDomain): - """An instance of PageViewPerf represents: a page view with no performance data, a page view with performance data, or just the performance data of an earlier page request. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar id: Required. Identifier of a page view instance. Used for correlation between page view - and other telemetry items. - :vartype id: str - :ivar name: Required. Event name. Keep it low cardinality to allow proper grouping and useful - metrics. - :vartype name: str - :ivar url: Request URL with all query string parameters. - :vartype url: str - :ivar duration: Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view (PageViewData), - this is the duration. For a page view with performance information (PageViewPerfData), this is - the page load time. Must be less than 1000 days. - :vartype duration: str - :ivar perf_total: Performance total in TimeSpan 'G' (general long) format: d:hh:mm:ss.fffffff. - :vartype perf_total: str - :ivar network_connect: Network connection time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :vartype network_connect: str - :ivar sent_request: Sent request time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :vartype sent_request: str - :ivar received_response: Received response time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :vartype received_response: str - :ivar dom_processing: DOM processing time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :vartype dom_processing: str - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "id": {"required": True, "max_length": 512, "min_length": 0}, - "name": {"required": True, "max_length": 1024, "min_length": 0}, - "url": {"max_length": 2048, "min_length": 0}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "id": {"key": "id", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "url": {"key": "url", "type": "str"}, - "duration": {"key": "duration", "type": "str"}, - "perf_total": {"key": "perfTotal", "type": "str"}, - "network_connect": {"key": "networkConnect", "type": "str"}, - "sent_request": {"key": "sentRequest", "type": "str"}, - "received_response": {"key": "receivedResponse", "type": "str"}, - "dom_processing": {"key": "domProcessing", "type": "str"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__( - self, - *, - version: int = 2, - id: str, - name: str, - additional_properties: Optional[Dict[str, Any]] = None, - url: Optional[str] = None, - duration: Optional[str] = None, - perf_total: Optional[str] = None, - network_connect: Optional[str] = None, - sent_request: Optional[str] = None, - received_response: Optional[str] = None, - dom_processing: Optional[str] = None, - properties: Optional[Dict[str, str]] = None, - measurements: Optional[Dict[str, float]] = None, - **kwargs - ): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword id: Required. Identifier of a page view instance. Used for correlation between page - view and other telemetry items. - :paramtype id: str - :keyword name: Required. Event name. Keep it low cardinality to allow proper grouping and - useful metrics. - :paramtype name: str - :keyword url: Request URL with all query string parameters. - :paramtype url: str - :keyword duration: Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view - (PageViewData), this is the duration. For a page view with performance information - (PageViewPerfData), this is the page load time. Must be less than 1000 days. - :paramtype duration: str - :keyword perf_total: Performance total in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :paramtype perf_total: str - :keyword network_connect: Network connection time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :paramtype network_connect: str - :keyword sent_request: Sent request time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :paramtype sent_request: str - :keyword received_response: Received response time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :paramtype received_response: str - :keyword dom_processing: DOM processing time in TimeSpan 'G' (general long) format: - d:hh:mm:ss.fffffff. - :paramtype dom_processing: str - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(PageViewPerfData, self).__init__(additional_properties=additional_properties, version=version, **kwargs) - self.id = id - self.name = name - self.url = url - self.duration = duration - self.perf_total = perf_total - self.network_connect = network_connect - self.sent_request = sent_request - self.received_response = received_response - self.dom_processing = dom_processing - self.properties = properties - self.measurements = measurements - - -class RemoteDependencyData(MonitorDomain): - """An instance of Remote Dependency represents an interaction of the monitored component with a remote component/service like SQL or an HTTP endpoint. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar id: Identifier of a dependency call instance. Used for correlation with the request - telemetry item corresponding to this dependency call. - :vartype id: str - :ivar name: Required. Name of the command initiated with this dependency call. Low cardinality - value. Examples are stored procedure name and URL path template. - :vartype name: str - :ivar result_code: Result code of a dependency call. Examples are SQL error code and HTTP - status code. - :vartype result_code: str - :ivar data: Command initiated by this dependency call. Examples are SQL statement and HTTP URL - with all query parameters. - :vartype data: str - :ivar type: Dependency type name. Very low cardinality value for logical grouping of - dependencies and interpretation of other fields like commandName and resultCode. Examples are - SQL, Azure table, and HTTP. - :vartype type: str - :ivar target: Target site of a dependency call. Examples are server name, host address. - :vartype target: str - :ivar duration: Required. Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than - 1000 days. - :vartype duration: str - :ivar success: Indication of successful or unsuccessful call. - :vartype success: bool - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "id": {"max_length": 512, "min_length": 0}, - "name": {"required": True, "max_length": 1024, "min_length": 0}, - "result_code": {"max_length": 1024, "min_length": 0}, - "data": {"max_length": 8192, "min_length": 0}, - "type": {"max_length": 1024, "min_length": 0}, - "target": {"max_length": 1024, "min_length": 0}, - "duration": {"required": True}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "id": {"key": "id", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "result_code": {"key": "resultCode", "type": "str"}, - "data": {"key": "data", "type": "str"}, - "type": {"key": "type", "type": "str"}, - "target": {"key": "target", "type": "str"}, - "duration": {"key": "duration", "type": "str"}, - "success": {"key": "success", "type": "bool"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__( - self, - *, - version: int = 2, - name: str, - duration: str, - additional_properties: Optional[Dict[str, Any]] = None, - id: Optional[str] = None, - result_code: Optional[str] = None, - data: Optional[str] = None, - type: Optional[str] = None, - target: Optional[str] = None, - success: Optional[bool] = True, - properties: Optional[Dict[str, str]] = None, - measurements: Optional[Dict[str, float]] = None, - **kwargs - ): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword id: Identifier of a dependency call instance. Used for correlation with the request - telemetry item corresponding to this dependency call. - :paramtype id: str - :keyword name: Required. Name of the command initiated with this dependency call. Low - cardinality value. Examples are stored procedure name and URL path template. - :paramtype name: str - :keyword result_code: Result code of a dependency call. Examples are SQL error code and HTTP - status code. - :paramtype result_code: str - :keyword data: Command initiated by this dependency call. Examples are SQL statement and HTTP - URL with all query parameters. - :paramtype data: str - :keyword type: Dependency type name. Very low cardinality value for logical grouping of - dependencies and interpretation of other fields like commandName and resultCode. Examples are - SQL, Azure table, and HTTP. - :paramtype type: str - :keyword target: Target site of a dependency call. Examples are server name, host address. - :paramtype target: str - :keyword duration: Required. Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than - 1000 days. - :paramtype duration: str - :keyword success: Indication of successful or unsuccessful call. - :paramtype success: bool - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(RemoteDependencyData, self).__init__( - additional_properties=additional_properties, version=version, **kwargs - ) - self.id = id - self.name = name - self.result_code = result_code - self.data = data - self.type = type - self.target = target - self.duration = duration - self.success = success - self.properties = properties - self.measurements = measurements - - -class RequestData(MonitorDomain): - """An instance of Request represents completion of an external request to the application to do work and contains a summary of that request execution and the results. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar id: Required. Identifier of a request call instance. Used for correlation between request - and other telemetry items. - :vartype id: str - :ivar name: Name of the request. Represents code path taken to process request. Low cardinality - value to allow better grouping of requests. For HTTP requests it represents the HTTP method and - URL path template like 'GET /values/{id}'. - :vartype name: str - :ivar duration: Required. Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than - 1000 days. - :vartype duration: str - :ivar success: Required. Indication of successful or unsuccessful call. - :vartype success: bool - :ivar response_code: Required. Result of a request execution. HTTP status code for HTTP - requests. - :vartype response_code: str - :ivar source: Source of the request. Examples are the instrumentation key of the caller or the - ip address of the caller. - :vartype source: str - :ivar url: Request URL with all query string parameters. - :vartype url: str - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "id": {"required": True, "max_length": 512, "min_length": 0}, - "name": {"max_length": 1024, "min_length": 0}, - "duration": {"required": True}, - "success": {"required": True}, - "response_code": {"required": True, "max_length": 1024, "min_length": 0}, - "source": {"max_length": 1024, "min_length": 0}, - "url": {"max_length": 2048, "min_length": 0}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "id": {"key": "id", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "duration": {"key": "duration", "type": "str"}, - "success": {"key": "success", "type": "bool"}, - "response_code": {"key": "responseCode", "type": "str"}, - "source": {"key": "source", "type": "str"}, - "url": {"key": "url", "type": "str"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__( - self, - *, - version: int = 2, - id: str, - duration: str, - success: bool = True, - response_code: str, - additional_properties: Optional[Dict[str, Any]] = None, - name: Optional[str] = None, - source: Optional[str] = None, - url: Optional[str] = None, - properties: Optional[Dict[str, str]] = None, - measurements: Optional[Dict[str, float]] = None, - **kwargs - ): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword id: Required. Identifier of a request call instance. Used for correlation between - request and other telemetry items. - :paramtype id: str - :keyword name: Name of the request. Represents code path taken to process request. Low - cardinality value to allow better grouping of requests. For HTTP requests it represents the - HTTP method and URL path template like 'GET /values/{id}'. - :paramtype name: str - :keyword duration: Required. Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than - 1000 days. - :paramtype duration: str - :keyword success: Required. Indication of successful or unsuccessful call. - :paramtype success: bool - :keyword response_code: Required. Result of a request execution. HTTP status code for HTTP - requests. - :paramtype response_code: str - :keyword source: Source of the request. Examples are the instrumentation key of the caller or - the ip address of the caller. - :paramtype source: str - :keyword url: Request URL with all query string parameters. - :paramtype url: str - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(RequestData, self).__init__(additional_properties=additional_properties, version=version, **kwargs) - self.id = id - self.name = name - self.duration = duration - self.success = success - self.response_code = response_code - self.source = source - self.url = url - self.properties = properties - self.measurements = measurements - - -class StackFrame(msrest.serialization.Model): - """Stack frame information. - - All required parameters must be populated in order to send to Azure. - - :ivar level: Required. - :vartype level: int - :ivar method: Required. Method name. - :vartype method: str - :ivar assembly: Name of the assembly (dll, jar, etc.) containing this function. - :vartype assembly: str - :ivar file_name: File name or URL of the method implementation. - :vartype file_name: str - :ivar line: Line number of the code implementation. - :vartype line: int - """ - - _validation = { - "level": {"required": True}, - "method": {"required": True, "max_length": 1024, "min_length": 0}, - "assembly": {"max_length": 1024, "min_length": 0}, - "file_name": {"max_length": 1024, "min_length": 0}, - } - - _attribute_map = { - "level": {"key": "level", "type": "int"}, - "method": {"key": "method", "type": "str"}, - "assembly": {"key": "assembly", "type": "str"}, - "file_name": {"key": "fileName", "type": "str"}, - "line": {"key": "line", "type": "int"}, - } - - def __init__( - self, - *, - level: int, - method: str, - assembly: Optional[str] = None, - file_name: Optional[str] = None, - line: Optional[int] = None, - **kwargs - ): - """ - :keyword level: Required. - :paramtype level: int - :keyword method: Required. Method name. - :paramtype method: str - :keyword assembly: Name of the assembly (dll, jar, etc.) containing this function. - :paramtype assembly: str - :keyword file_name: File name or URL of the method implementation. - :paramtype file_name: str - :keyword line: Line number of the code implementation. - :paramtype line: int - """ - super(StackFrame, self).__init__(**kwargs) - self.level = level - self.method = method - self.assembly = assembly - self.file_name = file_name - self.line = line - - -class TelemetryErrorDetails(msrest.serialization.Model): - """The error details. - - :ivar index: The index in the original payload of the item. - :vartype index: int - :ivar status_code: The item specific `HTTP Response status code <#Response Status Codes>`_. - :vartype status_code: int - :ivar message: The error message. - :vartype message: str - """ - - _attribute_map = { - "index": {"key": "index", "type": "int"}, - "status_code": {"key": "statusCode", "type": "int"}, - "message": {"key": "message", "type": "str"}, - } - - def __init__( - self, *, index: Optional[int] = None, status_code: Optional[int] = None, message: Optional[str] = None, **kwargs - ): - """ - :keyword index: The index in the original payload of the item. - :paramtype index: int - :keyword status_code: The item specific `HTTP Response status code <#Response Status Codes>`_. - :paramtype status_code: int - :keyword message: The error message. - :paramtype message: str - """ - super(TelemetryErrorDetails, self).__init__(**kwargs) - self.index = index - self.status_code = status_code - self.message = message - - -class TelemetryEventData(MonitorDomain): - """Instances of Event represent structured event records that can be grouped and searched by their properties. Event data item also creates a metric of event count by name. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar name: Required. Event name. Keep it low cardinality to allow proper grouping and useful - metrics. - :vartype name: str - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "name": {"required": True, "max_length": 512, "min_length": 0}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "name": {"key": "name", "type": "str"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__( - self, - *, - version: int = 2, - name: str, - additional_properties: Optional[Dict[str, Any]] = None, - properties: Optional[Dict[str, str]] = None, - measurements: Optional[Dict[str, float]] = None, - **kwargs - ): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword name: Required. Event name. Keep it low cardinality to allow proper grouping and - useful metrics. - :paramtype name: str - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(TelemetryEventData, self).__init__(additional_properties=additional_properties, version=version, **kwargs) - self.name = name - self.properties = properties - self.measurements = measurements - - -class TelemetryExceptionData(MonitorDomain): - """An instance of Exception represents a handled or unhandled exception that occurred during execution of the monitored application. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar version: Required. Schema version. - :vartype version: int - :ivar exceptions: Required. Exception chain - list of inner exceptions. - :vartype exceptions: list[~azure_monitor_client.models.TelemetryExceptionDetails] - :ivar severity_level: Severity level. Mostly used to indicate exception severity level when it - is reported by logging library. Possible values include: "Verbose", "Information", "Warning", - "Error", "Critical". - :vartype severity_level: str or ~azure_monitor_client.models.SeverityLevel - :ivar problem_id: Identifier of where the exception was thrown in code. Used for exceptions - grouping. Typically a combination of exception type and a function from the call stack. - :vartype problem_id: str - :ivar properties: Collection of custom properties. - :vartype properties: dict[str, str] - :ivar measurements: Collection of custom measurements. - :vartype measurements: dict[str, float] - """ - - _validation = { - "version": {"required": True}, - "exceptions": {"required": True}, - "problem_id": {"max_length": 1024, "min_length": 0}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "version": {"key": "ver", "type": "int"}, - "exceptions": {"key": "exceptions", "type": "[TelemetryExceptionDetails]"}, - "severity_level": {"key": "severityLevel", "type": "str"}, - "problem_id": {"key": "problemId", "type": "str"}, - "properties": {"key": "properties", "type": "{str}"}, - "measurements": {"key": "measurements", "type": "{float}"}, - } - - def __init__( - self, - *, - version: int = 2, - exceptions: List["TelemetryExceptionDetails"], - additional_properties: Optional[Dict[str, Any]] = None, - severity_level: Optional[Union[str, "SeverityLevel"]] = None, - problem_id: Optional[str] = None, - properties: Optional[Dict[str, str]] = None, - measurements: Optional[Dict[str, float]] = None, - **kwargs - ): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword version: Required. Schema version. - :paramtype version: int - :keyword exceptions: Required. Exception chain - list of inner exceptions. - :paramtype exceptions: list[~azure_monitor_client.models.TelemetryExceptionDetails] - :keyword severity_level: Severity level. Mostly used to indicate exception severity level when - it is reported by logging library. Possible values include: "Verbose", "Information", - "Warning", "Error", "Critical". - :paramtype severity_level: str or ~azure_monitor_client.models.SeverityLevel - :keyword problem_id: Identifier of where the exception was thrown in code. Used for exceptions - grouping. Typically a combination of exception type and a function from the call stack. - :paramtype problem_id: str - :keyword properties: Collection of custom properties. - :paramtype properties: dict[str, str] - :keyword measurements: Collection of custom measurements. - :paramtype measurements: dict[str, float] - """ - super(TelemetryExceptionData, self).__init__( - additional_properties=additional_properties, version=version, **kwargs - ) - self.exceptions = exceptions - self.severity_level = severity_level - self.problem_id = problem_id - self.properties = properties - self.measurements = measurements - - -class TelemetryExceptionDetails(msrest.serialization.Model): - """Exception details of the exception in a chain. - - All required parameters must be populated in order to send to Azure. - - :ivar id: In case exception is nested (outer exception contains inner one), the id and outerId - properties are used to represent the nesting. - :vartype id: int - :ivar outer_id: The value of outerId is a reference to an element in ExceptionDetails that - represents the outer exception. - :vartype outer_id: int - :ivar type_name: Exception type name. - :vartype type_name: str - :ivar message: Required. Exception message. - :vartype message: str - :ivar has_full_stack: Indicates if full exception stack is provided in the exception. The stack - may be trimmed, such as in the case of a StackOverflow exception. - :vartype has_full_stack: bool - :ivar stack: Text describing the stack. Either stack or parsedStack should have a value. - :vartype stack: str - :ivar parsed_stack: List of stack frames. Either stack or parsedStack should have a value. - :vartype parsed_stack: list[~azure_monitor_client.models.StackFrame] - """ - - _validation = { - "type_name": {"max_length": 1024, "min_length": 0}, - "message": {"required": True, "max_length": 32768, "min_length": 0}, - "stack": {"max_length": 32768, "min_length": 0}, - } - - _attribute_map = { - "id": {"key": "id", "type": "int"}, - "outer_id": {"key": "outerId", "type": "int"}, - "type_name": {"key": "typeName", "type": "str"}, - "message": {"key": "message", "type": "str"}, - "has_full_stack": {"key": "hasFullStack", "type": "bool"}, - "stack": {"key": "stack", "type": "str"}, - "parsed_stack": {"key": "parsedStack", "type": "[StackFrame]"}, - } - - def __init__( - self, - *, - message: str, - id: Optional[int] = None, - outer_id: Optional[int] = None, - type_name: Optional[str] = None, - has_full_stack: Optional[bool] = True, - stack: Optional[str] = None, - parsed_stack: Optional[List["StackFrame"]] = None, - **kwargs - ): - """ - :keyword id: In case exception is nested (outer exception contains inner one), the id and - outerId properties are used to represent the nesting. - :paramtype id: int - :keyword outer_id: The value of outerId is a reference to an element in ExceptionDetails that - represents the outer exception. - :paramtype outer_id: int - :keyword type_name: Exception type name. - :paramtype type_name: str - :keyword message: Required. Exception message. - :paramtype message: str - :keyword has_full_stack: Indicates if full exception stack is provided in the exception. The - stack may be trimmed, such as in the case of a StackOverflow exception. - :paramtype has_full_stack: bool - :keyword stack: Text describing the stack. Either stack or parsedStack should have a value. - :paramtype stack: str - :keyword parsed_stack: List of stack frames. Either stack or parsedStack should have a value. - :paramtype parsed_stack: list[~azure_monitor_client.models.StackFrame] - """ - super(TelemetryExceptionDetails, self).__init__(**kwargs) - self.id = id - self.outer_id = outer_id - self.type_name = type_name - self.message = message - self.has_full_stack = has_full_stack - self.stack = stack - self.parsed_stack = parsed_stack - - -class TelemetryItem(msrest.serialization.Model): - """System variables for a telemetry item. - - All required parameters must be populated in order to send to Azure. - - :ivar version: Envelope version. For internal use only. By assigning this the default, it will - not be serialized within the payload unless changed to a value other than #1. - :vartype version: int - :ivar name: Required. Type name of telemetry data item. - :vartype name: str - :ivar time: Required. Event date time when telemetry item was created. This is the wall clock - time on the client when the event was generated. There is no guarantee that the client's time - is accurate. This field must be formatted in UTC ISO 8601 format, with a trailing 'Z' - character, as described publicly on https://en.wikipedia.org/wiki/ISO_8601#UTC. Note: the - number of decimal seconds digits provided are variable (and unspecified). Consumers should - handle this, i.e. managed code consumers should not use format 'O' for parsing as it specifies - a fixed length. Example: 2009-06-15T13:45:30.0000000Z. - :vartype time: ~datetime.datetime - :ivar sample_rate: Sampling rate used in application. This telemetry item represents 100 / - sampleRate actual telemetry items. - :vartype sample_rate: float - :ivar sequence: Sequence field used to track absolute order of uploaded events. - :vartype sequence: str - :ivar instrumentation_key: The instrumentation key of the Application Insights resource. - :vartype instrumentation_key: str - :ivar tags: A set of tags. Key/value collection of context properties. See ContextTagKeys for - information on available properties. - :vartype tags: dict[str, str] - :ivar data: Telemetry data item. - :vartype data: ~azure_monitor_client.models.MonitorBase - """ - - _validation = { - "name": {"required": True}, - "time": {"required": True}, - "sequence": {"max_length": 64, "min_length": 0}, - } - - _attribute_map = { - "version": {"key": "ver", "type": "int"}, - "name": {"key": "name", "type": "str"}, - "time": {"key": "time", "type": "iso-8601"}, - "sample_rate": {"key": "sampleRate", "type": "float"}, - "sequence": {"key": "seq", "type": "str"}, - "instrumentation_key": {"key": "iKey", "type": "str"}, - "tags": {"key": "tags", "type": "{str}"}, - "data": {"key": "data", "type": "MonitorBase"}, - } - - def __init__( - self, - *, - name: str, - time: datetime.datetime, - version: Optional[int] = 1, - sample_rate: Optional[float] = 100, - sequence: Optional[str] = None, - instrumentation_key: Optional[str] = None, - tags: Optional[Dict[str, str]] = None, - data: Optional["MonitorBase"] = None, - **kwargs - ): - """ - :keyword version: Envelope version. For internal use only. By assigning this the default, it - will not be serialized within the payload unless changed to a value other than #1. - :paramtype version: int - :keyword name: Required. Type name of telemetry data item. - :paramtype name: str - :keyword time: Required. Event date time when telemetry item was created. This is the wall - clock time on the client when the event was generated. There is no guarantee that the client's - time is accurate. This field must be formatted in UTC ISO 8601 format, with a trailing 'Z' - character, as described publicly on https://en.wikipedia.org/wiki/ISO_8601#UTC. Note: the - number of decimal seconds digits provided are variable (and unspecified). Consumers should - handle this, i.e. managed code consumers should not use format 'O' for parsing as it specifies - a fixed length. Example: 2009-06-15T13:45:30.0000000Z. - :paramtype time: ~datetime.datetime - :keyword sample_rate: Sampling rate used in application. This telemetry item represents 100 / - sampleRate actual telemetry items. - :paramtype sample_rate: float - :keyword sequence: Sequence field used to track absolute order of uploaded events. - :paramtype sequence: str - :keyword instrumentation_key: The instrumentation key of the Application Insights resource. - :paramtype instrumentation_key: str - :keyword tags: A set of tags. Key/value collection of context properties. See ContextTagKeys - for information on available properties. - :paramtype tags: dict[str, str] - :keyword data: Telemetry data item. - :paramtype data: ~azure_monitor_client.models.MonitorBase - """ - super(TelemetryItem, self).__init__(**kwargs) - self.version = version - self.name = name - self.time = time - self.sample_rate = sample_rate - self.sequence = sequence - self.instrumentation_key = instrumentation_key - self.tags = tags - self.data = data - - -class TrackResponse(msrest.serialization.Model): - """Response containing the status of each telemetry item. - - :ivar items_received: The number of items received. - :vartype items_received: int - :ivar items_accepted: The number of items accepted. - :vartype items_accepted: int - :ivar errors: An array of error detail objects. - :vartype errors: list[~azure_monitor_client.models.TelemetryErrorDetails] - """ - - _attribute_map = { - "items_received": {"key": "itemsReceived", "type": "int"}, - "items_accepted": {"key": "itemsAccepted", "type": "int"}, - "errors": {"key": "errors", "type": "[TelemetryErrorDetails]"}, - } - - def __init__( # type: ignore - self, - *, - items_received: Optional[int] = None, - items_accepted: Optional[int] = None, - errors: Optional[List["TelemetryErrorDetails"]] = None, - **kwargs # type: Any - ): - """ - :keyword items_received: The number of items received. - :paramtype items_received: int - :keyword items_accepted: The number of items accepted. - :paramtype items_accepted: int - :keyword errors: An array of error detail objects. - :paramtype errors: list[~azure_monitor_client.models.TelemetryErrorDetails] - """ - super(TrackResponse, self).__init__(**kwargs) - self.items_received = items_received - self.items_accepted = items_accepted - self.errors = errors diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/operations/_azure_monitor_client_operations.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/operations/_azure_monitor_client_operations.py deleted file mode 100644 index 6bc3c37e9233..000000000000 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/operations/_azure_monitor_client_operations.py +++ /dev/null @@ -1,140 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING - -from msrest import Serializer - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpResponse -from azure.core.rest import HttpRequest - -from .. import models as _models -from .._vendor import _convert_request - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, List, Optional, TypeVar - - T = TypeVar("T") - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False -# fmt: off - -def build_track_request( - **kwargs # type: Any -): - # type: (...) -> HttpRequest - content_type = kwargs.pop('content_type', None) # type: Optional[str] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "/track") - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if content_type is not None: - _header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="POST", - url=_url, - headers=_header_parameters, - **kwargs - ) - - -# fmt: on -class AzureMonitorClientOperationsMixin(object): - - def track( - self, - body, # type: List["_models.TelemetryItem"] - **kwargs # type: Any - ): - # type: (...) -> "_models.TrackResponse" - """Track telemetry events. - - This operation sends a sequence of telemetry events that will be monitored by Azure Monitor. - - :param body: The list of telemetry events to track. - :type body: list[~azure_monitor_client.models.TelemetryItem] - :keyword callable cls: A custom type or function that will be passed the direct response - :return: TrackResponse, or the result of cls(response) - :rtype: ~azure_monitor_client.models.TrackResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop("cls", None) # type: ClsType["_models.TrackResponse"] - error_map = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 400: lambda response: HttpResponseError( - response=response, model=self._deserialize(_models.TrackResponse, response) - ), - 402: lambda response: HttpResponseError( - response=response, model=self._deserialize(_models.TrackResponse, response) - ), - 429: lambda response: HttpResponseError( - response=response, model=self._deserialize(_models.TrackResponse, response) - ), - 500: lambda response: HttpResponseError( - response=response, model=self._deserialize(_models.TrackResponse, response) - ), - 503: lambda response: HttpResponseError( - response=response, model=self._deserialize(_models.TrackResponse, response) - ), - } - error_map.update(kwargs.pop("error_map", {})) - - content_type = kwargs.pop("content_type", "application/json") # type: Optional[str] - - _json = self._serialize.body(body, "[TelemetryItem]") - - request = build_track_request( - content_type=content_type, - json=_json, - template_url=self.track.metadata["url"], - ) - request = _convert_request(request) - path_format_arguments = { - "Host": self._serialize.url("self._config.host", self._config.host, "str", skip_quote=True), - } - request.url = self._client.format_url(request.url, **path_format_arguments) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, stream=False, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if response.status_code == 200: - deserialized = self._deserialize("TrackResponse", pipeline_response) - - if response.status_code == 206: - deserialized = self._deserialize("TrackResponse", pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - - track.metadata = {"url": "/track"} # type: ignore diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/tsp-location.yaml b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/tsp-location.yaml new file mode 100644 index 000000000000..3f92798dc31e --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_generated/tsp-location.yaml @@ -0,0 +1,4 @@ +directory: specification/applicationinsights/ApplicationInsights.Monitor.Exporters +commit: d131fceec1b73ddcb6b117ad8748c9d2fe71ca46 +repo: pvaneck/azure-rest-api-specs +additionalDirectories: diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/__init__.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/__init__.py index a66a196ba3d3..8e3fd5c5a932 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/__init__.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/__init__.py @@ -5,4 +5,4 @@ enable_performance_counters, ) -__all__ = [ "enable_performance_counters" ] +__all__ = ["enable_performance_counters"] diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_constants.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_constants.py index a009fe88a3f9..a60155775654 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_constants.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_constants.py @@ -2,41 +2,35 @@ # Licensed under the MIT License. # (OpenTelemetry metric name, Breeze metric name) -_AVAILABLE_MEMORY = ( - "azuremonitor.performancecounter.memoryavailablebytes", - "\\Memory\\Available Bytes" -) +_AVAILABLE_MEMORY = ("azuremonitor.performancecounter.memoryavailablebytes", "\\Memory\\Available Bytes") _EXCEPTION_RATE = ( "azuremonitor.performancecounter.exceptionssec", - "\\.NET CLR Exceptions(??APP_CLR_PROC??)\\# of Exceps Thrown / sec" + "\\.NET CLR Exceptions(??APP_CLR_PROC??)\\# of Exceps Thrown / sec", ) _REQUEST_EXECUTION_TIME = ( "azuremonitor.performancecounter.requestexecutiontime", - "\\ASP.NET Applications(??APP_W3SVC_PROC??)\\Request Execution Time" + "\\ASP.NET Applications(??APP_W3SVC_PROC??)\\Request Execution Time", ) _REQUEST_RATE = ( "azuremonitor.performancecounter.requestssec", - "\\ASP.NET Applications(??APP_W3SVC_PROC??)\\Requests/Sec" -) -_PROCESS_CPU = ( - "azuremonitor.performancecounter.processtime", - "\\Process(??APP_WIN32_PROC??)\\% Processor Time" + "\\ASP.NET Applications(??APP_W3SVC_PROC??)\\Requests/Sec", ) +_PROCESS_CPU = ("azuremonitor.performancecounter.processtime", "\\Process(??APP_WIN32_PROC??)\\% Processor Time") _PROCESS_CPU_NORMALIZED = ( "azuremonitor.performancecounter.processtimenormalized", - "\\Process(??APP_WIN32_PROC??)\\% Processor Time Normalized" + "\\Process(??APP_WIN32_PROC??)\\% Processor Time Normalized", ) _PROCESS_IO_RATE = ( "azuremonitor.performancecounter.processiobytessec", - "\\Process(??APP_WIN32_PROC??)\\IO Data Bytes/sec" + "\\Process(??APP_WIN32_PROC??)\\IO Data Bytes/sec", ) _PROCESS_PRIVATE_BYTES = ( "azuremonitor.performancecounter.processprivatebytes", - "\\Process(??APP_WIN32_PROC??)\\Private Bytes" + "\\Process(??APP_WIN32_PROC??)\\Private Bytes", ) _PROCESSOR_TIME = ( "azuremonitor.performancecounter.processortotalprocessortime", - "\\Processor(_Total)\\% Processor Time" + "\\Processor(_Total)\\% Processor Time", ) _PERFORMANCE_COUNTER_METRIC_NAME_MAPPINGS = dict( diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_manager.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_manager.py index 34d53c376a39..a09ad4ab0579 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_manager.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_manager.py @@ -68,7 +68,7 @@ def _get_process_cpu(options: CallbackOptions) -> Iterable[Observation]: In the case of a process running on multiple threads on different CPU cores, the returned value can be > 100.0. - + :param options: Callback options for OpenTelemetry observable gauge. :type options: ~opentelemetry.metrics.CallbackOptions :returns: Process CPU usage percentage observations. @@ -90,7 +90,7 @@ def _get_process_cpu_normalized(options: CallbackOptions) -> Iterable[Observatio In the case of a process running on multiple threads on different CPU cores, the returned value can be > 100.0. We normalize the CPU process usage using the number of logical CPUs. - + :param options: Callback options for OpenTelemetry observable gauge. :type options: ~opentelemetry.metrics.CallbackOptions :returns: Normalized process CPU usage percentage observations. @@ -120,7 +120,7 @@ def _get_available_memory(options: CallbackOptions) -> Iterable[Observation]: Available memory is defined as memory that can be given instantly to processes without the system going into swap. - + :param options: Callback options for OpenTelemetry observable gauge. :type options: ~opentelemetry.metrics.CallbackOptions :returns: Available memory in bytes observations. @@ -141,7 +141,7 @@ def _get_process_memory(options: CallbackOptions) -> Iterable[Observation]: Private bytes for the current process is measured by the Resident Set Size, which is the non-swapped physical memory a process has used. - + :param options: Callback options for OpenTelemetry observable gauge. :type options: ~opentelemetry.metrics.CallbackOptions :returns: Process memory usage in bytes observations. @@ -189,9 +189,10 @@ def _get_process_io(options: CallbackOptions) -> Iterable[Observation]: _logger.exception("Error getting process I/O rate: %s", e) yield Observation(0, {}) + def _get_cpu_times_total(cpu_times): """Calculate total CPU time from CPU times structure. - + :param cpu_times: CPU times structure from psutil. :type cpu_times: psutil._common.scputimes :returns: Total CPU time. @@ -226,7 +227,7 @@ def _get_processor_time(options: CallbackOptions) -> Iterable[Observation]: Processor time is defined as the current system-wide CPU utilization minus idle CPU time as a percentage. Return values range from 0.0 to 100.0. - + :param options: Callback options for OpenTelemetry observable gauge. :type options: ~opentelemetry.metrics.CallbackOptions :returns: System-wide CPU utilization percentage observations. @@ -240,7 +241,7 @@ def _get_processor_time(options: CallbackOptions) -> Iterable[Observation]: last_total = _get_cpu_times_total(_LAST_CPU_TIMES) idle_d = cpu_times.idle - _LAST_CPU_TIMES.idle total_d = total - last_total - utilization_percentage = 100*(total_d - idle_d)/total_d + utilization_percentage = 100 * (total_d - idle_d) / total_d _LAST_CPU_TIMES = cpu_times yield Observation(utilization_percentage, {}) except Exception as e: # pylint: disable=broad-except @@ -251,7 +252,7 @@ def _get_processor_time(options: CallbackOptions) -> Iterable[Observation]: # pylint: disable=unused-argument def _get_request_rate(options: CallbackOptions) -> Iterable[Observation]: """Get request rate in requests per second. - + :param options: Callback options for OpenTelemetry observable gauge. :type options: ~opentelemetry.metrics.CallbackOptions :returns: Request rate in requests per second observations. @@ -276,7 +277,7 @@ def _get_request_rate(options: CallbackOptions) -> Iterable[Observation]: # pylint: disable=unused-argument def _get_exception_rate(options: CallbackOptions) -> Iterable[Observation]: """Get exception rate in exceptions per second. - + :param options: Callback options for OpenTelemetry observable gauge. :type options: ~opentelemetry.metrics.CallbackOptions :returns: Exception rate in exceptions per second observations. @@ -313,13 +314,13 @@ def __init__(self, meter): name=self.NAME[0], description="performance counter available memory in bytes", unit="byte", - callbacks=[_get_available_memory] + callbacks=[_get_available_memory], ) @property def gauge(self): """Get the underlying gauge. - + :returns: The OpenTelemetry observable gauge instance. :rtype: ~opentelemetry.metrics.ObservableGauge """ @@ -341,13 +342,13 @@ def __init__(self, meter): name=self.NAME[0], description="performance counter exceptions per second", unit="exc/sec", - callbacks=[_get_exception_rate] + callbacks=[_get_exception_rate], ) @property def gauge(self): """Get the underlying gauge. - + :returns: The OpenTelemetry observable gauge instance. :rtype: ~opentelemetry.metrics.ObservableGauge """ @@ -374,7 +375,7 @@ def __init__(self, meter): @property def gauge(self): """Get the underlying gauge. - + :returns: The OpenTelemetry histogram instance. :rtype: ~opentelemetry.metrics.Histogram """ @@ -396,13 +397,13 @@ def __init__(self, meter): name=self.NAME[0], description="performance counter requests per second", unit="req/sec", - callbacks=[_get_request_rate] + callbacks=[_get_request_rate], ) @property def gauge(self): """Get the underlying gauge. - + :returns: The OpenTelemetry observable gauge instance. :rtype: ~opentelemetry.metrics.ObservableGauge """ @@ -427,13 +428,13 @@ def __init__(self, meter): name=self.NAME[0], description="performance counter process cpu usage as a percentage", unit="percent", - callbacks=[_get_process_cpu] + callbacks=[_get_process_cpu], ) @property def gauge(self): """Get the underlying gauge. - + :returns: The OpenTelemetry observable gauge instance. :rtype: ~opentelemetry.metrics.ObservableGauge """ @@ -457,15 +458,15 @@ def __init__(self, meter): self._gauge = meter.create_observable_gauge( name=self.NAME[0], description="performance counter process cpu usage as a percentage " - "divided by the number of total processors.", + "divided by the number of total processors.", unit="percent", - callbacks=[_get_process_cpu_normalized] + callbacks=[_get_process_cpu_normalized], ) @property def gauge(self): """Get the underlying gauge. - + :returns: The OpenTelemetry observable gauge instance. :rtype: ~opentelemetry.metrics.ObservableGauge """ @@ -487,13 +488,13 @@ def __init__(self, meter): name=self.NAME[0], description="performance counter rate of I/O operations per second", unit="byte/sec", - callbacks=[_get_process_io] + callbacks=[_get_process_io], ) @property def gauge(self): """Get the underlying gauge. - + :returns: The OpenTelemetry observable gauge instance. :rtype: ~opentelemetry.metrics.ObservableGauge """ @@ -515,13 +516,13 @@ def __init__(self, meter): name=self.NAME[0], description="performance counter amount of memory process has used in bytes", unit="byte", - callbacks=[_get_process_memory] + callbacks=[_get_process_memory], ) @property def gauge(self): """Get the underlying gauge. - + :returns: The OpenTelemetry observable gauge instance. :rtype: ~opentelemetry.metrics.ObservableGauge """ @@ -543,13 +544,13 @@ def __init__(self, meter): name=self.NAME[0], description="performance counter processor time as a percentage", unit="percent", - callbacks=[_get_processor_time] + callbacks=[_get_processor_time], ) @property def gauge(self): """Get the underlying gauge. - + :returns: The OpenTelemetry observable gauge instance. :rtype: ~opentelemetry.metrics.ObservableGauge """ @@ -577,7 +578,7 @@ class _PerformanceCountersManager(metaclass=Singleton): def __init__(self, meter_provider=None): """Initialize the performance counters manager. - + :param meter_provider: OpenTelemetry meter provider, if None uses global provider. :type meter_provider: ~opentelemetry.metrics.MeterProvider or None """ @@ -589,9 +590,7 @@ def __init__(self, meter_provider=None): if meter_provider is None: meter_provider = metrics.get_meter_provider() - self._meter = meter_provider.get_meter( - "azure.monitor.opentelemetry.performance_counters" - ) + self._meter = meter_provider.get_meter("azure.monitor.opentelemetry.performance_counters") # Initialize all performance counter metrics for metric_class in PERFORMANCE_COUNTER_METRICS: diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_processor.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_processor.py index d629535f4d5b..ed2964447a1a 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_processor.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_performance_counters/_processor.py @@ -11,7 +11,7 @@ class _PerformanceCountersLogRecordProcessor(LogRecordProcessor): def __init__(self): super().__init__() - self.call_on_emit = hasattr(super(), 'on_emit') + self.call_on_emit = hasattr(super(), "on_emit") def on_emit(self, readable_log_record: ReadableLogRecord) -> None: # type: ignore # pylint: disable=arguments-renamed pcm = _PerformanceCountersManager() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_constants.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_constants.py index 1f01369bef1d..c75c5e0207d3 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_constants.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_constants.py @@ -12,7 +12,7 @@ _PROCESSOR_TIME_NAME = ("azuremonitor.quickpulse.processortotalprocessortime", "\\Processor(_Total)\\% Processor Time") _PROCESS_TIME_NORMALIZED_NAME = ( "azuremonitor.quickpulse.processtimenormalized", - "\\% Process\\Processor Time Normalized" + "\\% Process\\Processor Time Normalized", ) # Request _REQUEST_RATE_NAME = ("azuremonitor.quickpulse.requestssec", "\\ApplicationInsights\\Requests/Sec") @@ -26,7 +26,7 @@ ) _DEPENDENCY_DURATION_NAME = ( "azuremonitor.quickpulse.dependencycallduration", - "\\ApplicationInsights\\Dependency Call Duration" + "\\ApplicationInsights\\Dependency Call Duration", ) # Exception _EXCEPTION_RATE_NAME = ("azuremonitor.quickpulse.exceptionssec", "\\ApplicationInsights\\Exceptions/Sec") diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_cpu.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_cpu.py index 08dce9d09fa6..b8ab93b8b76c 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_cpu.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_cpu.py @@ -60,4 +60,5 @@ def _get_process_time_normalized_old(options: CallbackOptions) -> Iterable[Obser def _get_process_time_normalized(options: CallbackOptions) -> Iterable[Observation]: yield Observation(_get_quickpulse_last_process_cpu(), {}) + # cSpell:enable diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_exporter.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_exporter.py index bae94096dc99..80ce67f6659f 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_exporter.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_exporter.py @@ -184,7 +184,9 @@ def export( try: _update_filter_configuration(etag, config) except Exception: # pylint: disable=broad-except - _logger.exception("Exception occurred while updating filter config.") # pylint: disable=C4769 + _logger.exception( + "Exception occurred while updating filter config." + ) # pylint: disable=C4769 result = MetricExportResult.FAILURE except Exception: # pylint: disable=broad-except _logger.exception("Exception occurred while publishing live metrics.") # pylint: disable=C4769 @@ -302,7 +304,9 @@ def _ticker(self) -> None: # Reset etag to default if not subscribed _set_quickpulse_etag("") except Exception: # pylint: disable=broad-except - _logger.exception("Exception occurred while reading live metrics ping response.") # pylint: disable=C4769 + _logger.exception( + "Exception occurred while reading live metrics ping response." + ) # pylint: disable=C4769 _set_quickpulse_etag("") # TODO: Implement redirect else: diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_live_metrics.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_live_metrics.py index 06af99bad203..b9d7b76e4e64 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_live_metrics.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_live_metrics.py @@ -62,10 +62,7 @@ def get_quickpulse_configuration_callback(settings: Dict[str, str]) -> None: manager = get_quickpulse_manager() # Check if live metrics should be enabled based on configuration - live_metrics_enabled = evaluate_feature( - _ONE_SETTINGS_FEATURE_LIVE_METRICS, - settings - ) + live_metrics_enabled = evaluate_feature(_ONE_SETTINGS_FEATURE_LIVE_METRICS, settings) if live_metrics_enabled and not manager.is_initialized(): # Enable live metrics if it's not currently enabled @@ -74,7 +71,7 @@ def get_quickpulse_configuration_callback(settings: Dict[str, str]) -> None: manager.initialize( connection_string=manager._connection_string, # pylint:disable=protected-access credential=manager._credential, # pylint:disable=protected-access - resource=manager._resource # pylint:disable=protected-access + resource=manager._resource, # pylint:disable=protected-access ) elif live_metrics_enabled is False and manager.is_initialized(): # Disable live metrics if it's currently enabled diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_manager.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_manager.py index e6d5c8aa4077..ee6aed1e89bb 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_manager.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_manager.py @@ -17,7 +17,7 @@ from opentelemetry.semconv.trace import SpanAttributes from opentelemetry.trace import SpanKind -from azure.monitor.opentelemetry.exporter._generated.models import ContextTagKeys +from azure.monitor.opentelemetry.exporter._generated.exporter.models import ContextTagKeys from azure.monitor.opentelemetry.exporter._quickpulse._constants import ( _COMMITTED_BYTES_NAME, _DEPENDENCY_DURATION_NAME, @@ -388,19 +388,22 @@ def _validate_recording_resources(self) -> bool: :return: True if all required resources are available, False otherwise :rtype: bool """ - return all([ - self._request_rate_counter is not None, - self._request_failed_rate_counter is not None, - self._request_duration is not None, - self._dependency_rate_counter is not None, - self._dependency_failure_rate_counter is not None, - self._dependency_duration is not None, - self._exception_rate_counter is not None, - ]) + return all( + [ + self._request_rate_counter is not None, + self._request_failed_rate_counter is not None, + self._request_duration is not None, + self._dependency_rate_counter is not None, + self._dependency_failure_rate_counter is not None, + self._dependency_duration is not None, + self._exception_rate_counter is not None, + ] + ) # Filtering + # Called by record_span/record_log when processing a span/log_record for metrics filtering # Derives metrics from projections if applicable to current filters in config def _derive_metrics_from_telemetry_data(data: _TelemetryData): @@ -426,7 +429,9 @@ def _derive_metrics_from_telemetry_data(data: _TelemetryData): # Called by record_span/record_log when processing a span/log_record for docs filtering # Finds doc stream Ids and their doc filter configurations def _apply_document_filters_from_telemetry_data(data: _TelemetryData, exc_type: Optional[str] = None): - doc_config_dict: Dict[TelemetryType, Dict[str, List[FilterConjunctionGroupInfo]]] = _get_quickpulse_doc_stream_infos() # pylint: disable=C0301 + doc_config_dict: Dict[TelemetryType, Dict[str, List[FilterConjunctionGroupInfo]]] = ( + _get_quickpulse_doc_stream_infos() + ) # pylint: disable=C0301 stream_ids = set() doc_config = {} # type: ignore if isinstance(data, _RequestData): @@ -462,4 +467,5 @@ def _apply_document_filters_from_telemetry_data(data: _TelemetryData, exc_type: # Add the generated document to be sent to quickpulse _append_quickpulse_document(document) + # cSpell:enable diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_processor.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_processor.py index ac06802c3327..a91393c120bd 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_processor.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_processor.py @@ -11,7 +11,7 @@ class _QuickpulseLogRecordProcessor(LogRecordProcessor): def __init__(self): super().__init__() - self.call_on_emit = hasattr(super(), 'on_emit') + self.call_on_emit = hasattr(super(), "on_emit") def on_emit(self, readable_log_record: ReadableLogRecord) -> None: # type: ignore # pylint: disable=arguments-renamed qpm = get_quickpulse_manager() @@ -23,7 +23,7 @@ def on_emit(self, readable_log_record: ReadableLogRecord) -> None: # type: igno # this method was removed in opentelemetry-sdk and replaced with on_emit super().emit(readable_log_record) # type: ignore[safe-super,misc] # pylint: disable=no-member - def emit(self, readable_log_record: ReadableLogRecord) -> None: # pylint: disable=arguments-renamed + def emit(self, readable_log_record: ReadableLogRecord) -> None: # pylint: disable=arguments-renamed self.on_emit(readable_log_record) def shutdown(self): diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_state.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_state.py index 20e3eadbd9a5..ae05af47f6e9 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_state.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_state.py @@ -49,6 +49,7 @@ class _QuickpulseState(Enum): # Global singleton instance for easy access throughout the codebase _quickpulse_manager = None + def get_quickpulse_manager() -> "_QuickpulseManager": """Get the global Quickpulse Manager singleton instance. @@ -60,6 +61,7 @@ def get_quickpulse_manager() -> "_QuickpulseManager": global _quickpulse_manager # pylint: disable=global-statement if _quickpulse_manager is None: from azure.monitor.opentelemetry.exporter._quickpulse._manager import _QuickpulseManager + _quickpulse_manager = _QuickpulseManager() return _quickpulse_manager diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_types.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_types.py index fe0b0e94ec2f..bac39d33976c 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_types.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_types.py @@ -59,8 +59,7 @@ def _from_span(span: ReadableSpan): if span.attributes: attributes = span.attributes url = trace_utils._get_url_for_http_request(attributes) - status_code = attributes.get(HTTP_RESPONSE_STATUS_CODE) or \ - attributes.get(SpanAttributes.HTTP_STATUS_CODE) + status_code = attributes.get(HTTP_RESPONSE_STATUS_CODE) or attributes.get(SpanAttributes.HTTP_STATUS_CODE) if status_code: try: status_code = int(status_code) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_utils.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_utils.py index fa6bcb9d4f4d..936c0e3c324f 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_utils.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_utils.py @@ -111,7 +111,9 @@ def _get_span_document(data: Union[_DependencyData, _RequestData]) -> Union[Remo # mypy: disable-error-code="assignment" -def _get_log_record_document(data: Union[_ExceptionData, _TraceData], exc_type: Optional[str] = None) -> Union[ExceptionDocument, TraceDocument]: # pylint: disable=C0301 +def _get_log_record_document( + data: Union[_ExceptionData, _TraceData], exc_type: Optional[str] = None +) -> Union[ExceptionDocument, TraceDocument]: # pylint: disable=C0301 if isinstance(data, _ExceptionData): document = ExceptionDocument( document_type=DocumentType.EXCEPTION, @@ -148,6 +150,7 @@ def _get_metrics_from_projections() -> List[Tuple[str, float]]: # Time + def _ms_to_iso8601_string(ms: float) -> str: seconds, ms = divmod(ms, 1000) minutes, seconds = divmod(seconds, 60) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_validate.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_validate.py index 6d760117a659..efd3f689538f 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_validate.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_quickpulse/_validate.py @@ -26,8 +26,9 @@ def _validate_derived_metric_info(metric_info: DerivedMetricInfo) -> bool: for filter in filter_group.filters: # Validate field names to telemetry type # Validate predicate and comparands - if not _validate_filter_field_name(filter.field_name, metric_info.telemetry_type) or not \ - _validate_filter_predicate_and_comparand(filter): + if not _validate_filter_field_name( + filter.field_name, metric_info.telemetry_type + ) or not _validate_filter_predicate_and_comparand(filter): return False return True @@ -39,14 +40,15 @@ def _validate_document_filter_group_info(doc_filter_group: DocumentFilterConjunc for filter in doc_filter_group.filters.filters: # Validate field names to telemetry type # Validate predicate and comparands - if not _validate_filter_field_name(filter.field_name, doc_filter_group.telemetry_type) or not \ - _validate_filter_predicate_and_comparand(filter): + if not _validate_filter_field_name( + filter.field_name, doc_filter_group.telemetry_type + ) or not _validate_filter_predicate_and_comparand(filter): return False return True def _validate_telemetry_type(telemetry_type: str) -> bool: - # Validate telemetry type + # Validate telemetry type try: telemetry_type = TelemetryType(telemetry_type) except Exception: # pylint: disable=broad-except diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_storage.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_storage.py index 74590564b0d1..0f2ac4ab8bf0 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_storage.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_storage.py @@ -22,9 +22,8 @@ logger = logging.getLogger(__name__) -ICACLS_PATH = os.path.join( - os.environ.get("SYSTEMDRIVE", "C:"), r"\Windows\System32\icacls.exe" -) +ICACLS_PATH = os.path.join(os.environ.get("SYSTEMDRIVE", "C:"), r"\Windows\System32\icacls.exe") + def _fmt(timestamp: datetime.datetime) -> str: return timestamp.strftime("%Y-%m-%dT%H%M%S.%f") @@ -37,12 +36,14 @@ def _now() -> datetime.datetime: def _seconds(seconds: int) -> datetime.timedelta: return datetime.timedelta(seconds=seconds) + class StorageExportResult(Enum): LOCAL_FILE_BLOB_SUCCESS = 0 CLIENT_STORAGE_DISABLED = 1 CLIENT_PERSISTENCE_CAPACITY_REACHED = 2 CLIENT_READONLY = 3 + # pylint: disable=broad-except class LocalFileBlob: def __init__(self, fullpath: str) -> None: @@ -80,7 +81,7 @@ def put(self, data: List[Any], lease_period: int = 0) -> Union[StorageExportResu except Exception as ex: return str(ex) - def lease(self, period: int) -> Optional['LocalFileBlob']: + def lease(self, period: int) -> Optional["LocalFileBlob"]: timestamp = _now() + _seconds(period) fullpath: str = self.fullpath if fullpath.endswith(".lock"): @@ -129,15 +130,12 @@ def close(self) -> None: self._maintenance_task.cancel() self._maintenance_task.join() - def __enter__(self) -> 'LocalFileStorage': + def __enter__(self) -> "LocalFileStorage": return self # pylint: disable=redefined-builtin def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_value: Optional[BaseException], - traceback: Optional[Any] + self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[Any] ) -> None: self.close() @@ -225,7 +223,6 @@ def put(self, data: List[Any], lease_period: Optional[int] = None) -> Union[Stor except Exception as ex: return str(ex) - def _check_and_set_folder_permissions(self) -> bool: """ Validate and set folder permissions where the telemetry data will be stored. @@ -239,9 +236,7 @@ def _check_and_set_folder_permissions(self) -> bool: if os.name == "nt": user = self._get_current_user() if not user: - logger.warning( - "Failed to retrieve current user. Skipping folder permission setup." - ) + logger.warning("Failed to retrieve current user. Skipping folder permission setup.") return False result = subprocess.run( [ @@ -263,7 +258,7 @@ def _check_and_set_folder_permissions(self) -> bool: os.chmod(self._path, 0o700) return True except OSError as error: - if getattr(error, 'errno', None) == errno.EROFS: # cspell:disable-line + if getattr(error, "errno", None) == errno.EROFS: # cspell:disable-line set_local_storage_setup_state_readonly() else: set_local_storage_setup_state_exception(str(error)) @@ -293,9 +288,7 @@ def _check_storage_size(self) -> bool: "Persistent storage max capacity has been " "reached. Currently at {}KB. Telemetry will be " "lost. Please consider increasing the value of " - "'storage_max_size' in exporter config.".format( - str(size / 1024) - ) + "'storage_max_size' in exporter config.".format(str(size / 1024)) ) return False return True diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_utils.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_utils.py index 48399297eaef..cb62c7157bca 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_utils.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/_utils.py @@ -14,11 +14,10 @@ from opentelemetry.semconv.resource import ResourceAttributes from opentelemetry.sdk.resources import Resource -from opentelemetry.sdk.util import ns_to_iso_str from opentelemetry.util.types import Attributes from azure.core.pipeline.policies import BearerTokenCredentialPolicy -from azure.monitor.opentelemetry.exporter._generated.models import ContextTagKeys, TelemetryItem +from azure.monitor.opentelemetry.exporter._generated.exporter.models import ContextTagKeys, TelemetryItem from azure.monitor.opentelemetry.exporter._version import VERSION as ext_version from azure.monitor.opentelemetry.exporter._constants import ( _AKS_ARM_NAMESPACE_ID, @@ -107,12 +106,14 @@ def _get_os(): os = "w" return os + def _get_attach_type(): attach_type = "m" if _is_attach_enabled(): attach_type = "i" return attach_type + def _get_sdk_version_prefix(): sdk_version_prefix = "" rp = _get_rp() @@ -234,11 +235,12 @@ def cancel(self): def _create_telemetry_item(timestamp: int) -> TelemetryItem: + ts = datetime.datetime.fromtimestamp(timestamp / 1e9, tz=datetime.timezone.utc) return TelemetryItem( name="", instrumentation_key="", tags=dict(azure_monitor_context), # type: ignore - time=ns_to_iso_str(timestamp), # type: ignore + time=ts, ) @@ -330,10 +332,9 @@ def _is_synthetic_load(properties: Optional[Any]) -> bool: return False # Check both old and new semantic convention attributes for HTTP user agent - user_agent = ( - properties.get("user_agent.original") or # type: ignore # New semantic convention - properties.get("http.user_agent") # type: ignore # Legacy semantic convention - ) + user_agent = properties.get("user_agent.original") or properties.get( # type: ignore # New semantic convention + "http.user_agent" + ) # type: ignore # Legacy semantic convention if user_agent and isinstance(user_agent, str): return "AlwaysOn" in user_agent @@ -392,10 +393,11 @@ def _get_scope(aad_audience=None): class Singleton(type): """Metaclass for creating thread-safe singleton instances. - + Supports multiple singleton classes by maintaining a separate instance for each class that uses this metaclass. """ + _instances = {} # type: ignore _lock = threading.Lock() @@ -408,6 +410,7 @@ def __call__(cls, *args: Any, **kwargs: Any) -> Any: cls._instances[cls] = instance # type: ignore return cls._instances[cls] + def _get_telemetry_type(item: TelemetryItem): if hasattr(item, "data") and item.data is not None: base_type = getattr(item.data, "base_type", None) @@ -415,6 +418,7 @@ def _get_telemetry_type(item: TelemetryItem): return _TYPE_MAP.get(base_type, _UNKNOWN) return _UNKNOWN + def get_compute_type(): if _is_on_functions(): return _RP_Names.FUNCTIONS.value @@ -424,5 +428,6 @@ def get_compute_type(): return _RP_Names.AKS.value return _RP_Names.UNKNOWN.value + def _get_sha256_hash(input_str: str) -> str: return hashlib.sha256(input_str.encode("utf-8")).hexdigest() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/_base.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/_base.py index 71b0cc64dcd3..8f69920096dc 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/_base.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/_base.py @@ -20,9 +20,11 @@ RequestIdPolicy, ) from azure.identity import ManagedIdentityCredential -from azure.monitor.opentelemetry.exporter._generated import AzureMonitorClient -from azure.monitor.opentelemetry.exporter._generated._configuration import AzureMonitorClientConfiguration -from azure.monitor.opentelemetry.exporter._generated.models import ( +from azure.monitor.opentelemetry.exporter._generated.exporter import AzureMonitorExporterClient +from azure.monitor.opentelemetry.exporter._generated.exporter._configuration import ( + AzureMonitorExporterClientConfiguration, +) +from azure.monitor.opentelemetry.exporter._generated.exporter.models import ( MessageData, MetricsData, MonitorDomain, @@ -150,7 +152,7 @@ def __init__(self, **kwargs: Any) -> None: # specifies whether current exporter is used for collection of instrumentation metrics self._instrumentation_collection = kwargs.get("instrumentation_collection", False) - config = AzureMonitorClientConfiguration(self._endpoint, **kwargs) + config = AzureMonitorExporterClientConfiguration(self._endpoint, **kwargs) policies = [ RequestIdPolicy(**kwargs), config.headers_policy, @@ -168,7 +170,7 @@ def __init__(self, **kwargs: Any) -> None: config.http_logging_policy or HttpLoggingPolicy(**kwargs), ] - self.client: AzureMonitorClient = AzureMonitorClient( + self.client: AzureMonitorExporterClient = AzureMonitorExporterClient( host=self._endpoint, connection_timeout=self._timeout, policies=policies, **kwargs ) # TODO: Uncomment configuration changes once testing is completed @@ -197,6 +199,7 @@ def __init__(self, **kwargs: Any) -> None: try: # Import here to avoid circular dependencies from azure.monitor.opentelemetry.exporter.statsbeat._statsbeat import collect_statsbeat_metrics + collect_statsbeat_metrics(self) except Exception as e: # pylint: disable=broad-except logger.warning("Failed to initialize statsbeat metrics: %s", e) @@ -205,6 +208,7 @@ def __init__(self, **kwargs: Any) -> None: if self._should_collect_customer_sdkstats(): from azure.monitor.opentelemetry.exporter.statsbeat.customer import collect_customer_sdkstats + # Collect customer sdkstats metrics collect_customer_sdkstats(self) @@ -217,7 +221,7 @@ def _transmit_from_storage(self) -> None: if blob.lease(self._timeout + 5): blob_data = blob.get() if blob_data is not None: - envelopes = [_format_storage_telemetry_item(TelemetryItem.from_dict(x)) for x in blob_data] + envelopes = [_format_storage_telemetry_item(TelemetryItem(x)) for x in blob_data] result = self._transmit(envelopes) if result == ExportResult.FAILED_RETRYABLE: blob.lease(1) @@ -227,7 +231,6 @@ def _transmit_from_storage(self) -> None: # If blob.get() returns None, delete the corrupted blob blob.delete() - def _handle_transmit_from_storage(self, envelopes: List[TelemetryItem], result: ExportResult) -> None: if self.storage: if result == ExportResult.FAILED_RETRYABLE: @@ -254,7 +257,7 @@ def _transmit(self, envelopes: List[TelemetryItem]) -> ExportResult: Returns an ExportResult, this function should never throw an exception. :param envelopes: The list of telemetry items to transmit. - :type envelopes: list of ~azure.monitor.opentelemetry.exporter._generated.models.TelemetryItem + :type envelopes: list of ~azure.monitor.opentelemetry.exporter._generated.exporter.models.TelemetryItem :return: The result of the export. :rtype: ~azure.monitor.opentelemetry.exporter.export._base._ExportResult """ @@ -296,7 +299,12 @@ def _transmit(self, envelopes: List[TelemetryItem]) -> ExportResult: if not self._is_stats_exporter(): # Track dropped items in customer sdkstats, non-retryable scenario if self._should_collect_customer_sdkstats(): - if error is not None and hasattr(error, "index") and error.index is not None and isinstance(error.status_code, int): + if ( + error is not None + and hasattr(error, "index") + and error.index is not None + and isinstance(error.status_code, int) + ): track_dropped_items([envelopes[error.index]], error.status_code) logger.error( "Data drop %s: %s %s.", @@ -330,17 +338,17 @@ def _transmit(self, envelopes: List[TelemetryItem]) -> ExportResult: track_retry_items(envelopes, response_error) if response_error.status_code == 401: logger.error( - "Retryable server side error: %s. " \ - "Your Application Insights resource may be configured to use entra ID authentication. " \ + "Retryable server side error: %s. " + "Your Application Insights resource may be configured to use entra ID authentication. " "Please make sure your application is configured to use the correct token credential.", response_error.message, ) elif response_error.status_code == 403: logger.error( - "Retryable server side error: %s. " \ - "Your application may be configured with a token credential " \ - "but your Application Insights resource may be configured incorrectly. Please make sure " \ - "your Application Insights resource has enabled entra Id authentication and " \ + "Retryable server side error: %s. " + "Your application may be configured with a token credential " + "but your Application Insights resource may be configured incorrectly. Please make sure " + "your Application Insights resource has enabled entra Id authentication and " "has the correct `Monitoring Metrics Publisher` role assigned.", response_error.message, ) @@ -370,7 +378,11 @@ def _transmit(self, envelopes: List[TelemetryItem]) -> ExportResult: else: if not self._is_stats_exporter(): if self._should_collect_customer_sdkstats(): - track_dropped_items(envelopes, DropCode.CLIENT_EXCEPTION, _exception_categories.CLIENT_EXCEPTION.value) + track_dropped_items( + envelopes, + DropCode.CLIENT_EXCEPTION, + _exception_categories.CLIENT_EXCEPTION.value, + ) logger.error( "Error parsing redirect information.", ) @@ -380,9 +392,7 @@ def _transmit(self, envelopes: List[TelemetryItem]) -> ExportResult: # Track dropped items in customer sdkstats, non-retryable scenario if self._should_collect_customer_sdkstats(): track_dropped_items( - envelopes, - DropCode.CLIENT_EXCEPTION, - _exception_categories.CLIENT_EXCEPTION.value + envelopes, DropCode.CLIENT_EXCEPTION, _exception_categories.CLIENT_EXCEPTION.value ) logger.error( "Error sending telemetry because of circular redirects. " @@ -437,11 +447,15 @@ def _transmit(self, envelopes: List[TelemetryItem]) -> ExportResult: _update_requests_map(_REQ_EXCEPTION_NAME[1], value=exc_type) result = ExportResult.FAILED_RETRYABLE except Exception as ex: - logger.exception("Envelopes could not be exported and are not retryable: %s.", ex) # pylint: disable=C4769 + logger.exception( + "Envelopes could not be exported and are not retryable: %s.", ex + ) # pylint: disable=C4769 # Track dropped items in customer sdkstats for general exceptions if self._should_collect_customer_sdkstats(): - track_dropped_items(envelopes, DropCode.CLIENT_EXCEPTION, _exception_categories.CLIENT_EXCEPTION.value) + track_dropped_items( + envelopes, DropCode.CLIENT_EXCEPTION, _exception_categories.CLIENT_EXCEPTION.value + ) if self._should_collect_stats(): _update_requests_map(_REQ_EXCEPTION_NAME[1], value=ex.__class__.__name__) @@ -485,7 +499,6 @@ def _should_collect_stats(self): and not self._instrumentation_collection ) - # check to see whether its the case of customer sdkstats collection def _should_collect_customer_sdkstats(self): manager = get_customer_stats_manager() @@ -505,7 +518,8 @@ def _is_stats_exporter(self): return getattr(self, "_is_sdkstats", False) def _is_customer_sdkstats_exporter(self): - return getattr(self, '_is_customer_sdkstats', False) + return getattr(self, "_is_customer_sdkstats", False) + def _is_invalid_code(response_code: Optional[int]) -> bool: """Determine if response is a invalid response. @@ -567,22 +581,18 @@ def _reached_ingestion_code(response_code: Optional[int]) -> bool: } -# from_dict() deserializes incorrectly, format TelemetryItem correctly after it -# is called +# Set base_data to correct type if deserialized from storage def _format_storage_telemetry_item(item: TelemetryItem) -> TelemetryItem: - # After TelemetryItem.from_dict, all base_data fields are stored in - # additional_properties as a dict instead of in item.data.base_data itself - # item.data.base_data is also of type MonitorDomain instead of a child class + # item.data.base_data is of type MonitorDomain instead of a child class if hasattr(item, "data") and item.data is not None: if hasattr(item.data, "base_data") and isinstance(item.data.base_data, MonitorDomain): if hasattr(item.data, "base_type") and isinstance(item.data.base_type, str): base_type = _MONITOR_DOMAIN_MAPPING.get(item.data.base_type) - # Apply deserialization of additional_properties and store that as base_data if base_type: - item.data.base_data = base_type.from_dict(item.data.base_data.additional_properties) # type: ignore - item.data.base_data.additional_properties = None # type: ignore + item.data.base_data = base_type(item.data.base_data.as_dict()) return item + # mypy: disable-error-code="union-attr" def _get_authentication_credential(**kwargs: Any) -> Optional[ManagedIdentityCredential]: if "credential" in kwargs: @@ -600,11 +610,16 @@ def _get_authentication_credential(**kwargs: Any) -> Optional[ManagedIdentityCre credential = ManagedIdentityCredential() return credential except ValueError as exc: - logger.error("APPLICATIONINSIGHTS_AUTHENTICATION_STRING, %s, has invalid format: %s", auth_string, exc) # pylint: disable=do-not-log-exceptions-if-not-debug + logger.error( + "APPLICATIONINSIGHTS_AUTHENTICATION_STRING, %s, has invalid format: %s", auth_string, exc + ) # pylint: disable=do-not-log-exceptions-if-not-debug except Exception as e: - logger.error("Failed to get authentication credential and enable AAD: %s", e) # pylint: disable=do-not-log-exceptions-if-not-debug + logger.error( + "Failed to get authentication credential and enable AAD: %s", e + ) # pylint: disable=do-not-log-exceptions-if-not-debug return None + def _get_storage_directory(instrumentation_key: str) -> str: """Return the deterministic local storage path for a given instrumentation key. @@ -654,7 +669,7 @@ def _safe_psutil_call(func, default=""): instrumentation_key, user_segment, process_name, - os.fspath(application_directory), # cspell:disable-line + os.fspath(application_directory), # cspell:disable-line ] ) subdirectory = _get_sha256_hash(hash_input) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_exporter.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_exporter.py index b1101f39597f..c539ad92a4d4 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_exporter.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_exporter.py @@ -20,7 +20,7 @@ _MESSAGE_ENVELOPE_NAME, _DEFAULT_LOG_MESSAGE, ) -from azure.monitor.opentelemetry.exporter._generated.models import ( +from azure.monitor.opentelemetry.exporter._generated.exporter.models import ( ContextTagKeys, MessageData, MonitorBase, @@ -56,7 +56,9 @@ class AzureMonitorLogExporter(BaseExporter, LogRecordExporter): """Azure Monitor Log exporter for OpenTelemetry.""" - def export(self, batch: Sequence[ReadableLogRecord], **kwargs: Any) -> LogRecordExportResult: # pylint: disable=unused-argument + def export( + self, batch: Sequence[ReadableLogRecord], **kwargs: Any # pylint: disable=unused-argument + ) -> LogRecordExportResult: """Export log data. :param batch: OpenTelemetry ReadableLogRecord(s) to export. @@ -110,8 +112,9 @@ def _log_data_is_event(readable_log_record: ReadableLogRecord) -> bool: log_record = readable_log_record.log_record is_event = None if log_record.attributes: - is_event = log_record.attributes.get(_MICROSOFT_CUSTOM_EVENT_NAME) or \ - log_record.attributes.get(_APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE) # type: ignore + is_event = log_record.attributes.get(_MICROSOFT_CUSTOM_EVENT_NAME) or log_record.attributes.get( + _APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE + ) # type: ignore return is_event is not None @@ -121,27 +124,28 @@ def _convert_log_to_envelope(readable_log_record: ReadableLogRecord) -> Telemetr log_record = readable_log_record.log_record time_stamp = log_record.timestamp if log_record.timestamp is not None else log_record.observed_timestamp envelope = _utils._create_telemetry_item(time_stamp) - envelope.tags.update(_utils._populate_part_a_fields(readable_log_record.resource)) # type: ignore - envelope.tags[ContextTagKeys.AI_OPERATION_ID] = "{:032x}".format( # type: ignore - log_record.trace_id or _DEFAULT_TRACE_ID - ) - envelope.tags[ContextTagKeys.AI_OPERATION_PARENT_ID] = "{:016x}".format( # type: ignore - log_record.span_id or _DEFAULT_SPAN_ID - ) + + # Build tags dict all at once to avoid issues with descriptor creating new dicts on each access + tags = envelope.tags or {} + tags.update(_utils._populate_part_a_fields(readable_log_record.resource)) + tags[ContextTagKeys.AI_OPERATION_ID] = "{:032x}".format(log_record.trace_id or _DEFAULT_TRACE_ID) + tags[ContextTagKeys.AI_OPERATION_PARENT_ID] = "{:016x}".format(log_record.span_id or _DEFAULT_SPAN_ID) if ( log_record.attributes and ContextTagKeys.AI_OPERATION_NAME in log_record.attributes and log_record.attributes[ContextTagKeys.AI_OPERATION_NAME] is not None ): - envelope.tags[ContextTagKeys.AI_OPERATION_NAME] = log_record.attributes.get( # type: ignore - ContextTagKeys.AI_OPERATION_NAME - ) + tags[ContextTagKeys.AI_OPERATION_NAME] = log_record.attributes.get(ContextTagKeys.AI_OPERATION_NAME) if _utils._is_any_synthetic_source(log_record.attributes): - envelope.tags[ContextTagKeys.AI_OPERATION_SYNTHETIC_SOURCE] = "True" # type: ignore + tags[ContextTagKeys.AI_OPERATION_SYNTHETIC_SOURCE] = "True" # Special use case: Customers want to be able to set location ip on log records location_ip = trace_utils._get_location_ip(log_record.attributes) if location_ip: - envelope.tags[ContextTagKeys.AI_LOCATION_IP] = location_ip # type: ignore + tags[ContextTagKeys.AI_LOCATION_IP] = location_ip + + # Assign the complete tags dict back to envelope + envelope.tags = tags + properties = _utils._filter_custom_properties( log_record.attributes, lambda key, val: not _is_ignored_attribute(key) # type: ignore ) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_processor.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_processor.py index 52ac651eab2d..ccc024a2a542 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_processor.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/logs/_processor.py @@ -26,9 +26,9 @@ def __init__( self._options = options or {} self._enable_trace_based_sampling_for_logs = self._options.get("enable_trace_based_sampling_for_logs") - def on_emit(self, readable_log_record: ReadableLogRecord) -> None: # pylint: disable=arguments-renamed + def on_emit(self, readable_log_record: ReadableLogRecord) -> None: # pylint: disable=arguments-renamed # cspell: disable - """ Determines whether the logger should drop log records associated with unsampled traces. + """Determines whether the logger should drop log records associated with unsampled traces. If `trace_based_sampling` is `true`, log records associated with unsampled traces are dropped by the `Logger`. A log record is considered associated with an unsampled trace if it has a valid `SpanId` and its `TraceFlags` indicate that the trace is unsampled. A log record that isn't associated with a trace @@ -41,7 +41,10 @@ def on_emit(self, readable_log_record: ReadableLogRecord) -> None: # pylint: dis # cspell: enable if self._enable_trace_based_sampling_for_logs: if hasattr(readable_log_record, "log_record") and readable_log_record.log_record is not None: - if hasattr(readable_log_record.log_record, "context") and readable_log_record.log_record.context is not None: # pylint: disable=line-too-long + if ( + hasattr(readable_log_record.log_record, "context") + and readable_log_record.log_record.context is not None + ): # pylint: disable=line-too-long span = get_current_span(readable_log_record.log_record.context) span_context = span.get_span_context() if span_context.is_valid and not span_context.trace_flags.sampled: diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/metrics/_exporter.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/metrics/_exporter.py index f44adcc57cb3..2b81ccb817b9 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/metrics/_exporter.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/metrics/_exporter.py @@ -43,7 +43,7 @@ _STATSBEAT_METRIC_NAME_MAPPINGS, ) from azure.monitor.opentelemetry.exporter import _utils -from azure.monitor.opentelemetry.exporter._generated.models import ( +from azure.monitor.opentelemetry.exporter._generated.exporter.models import ( ContextTagKeys, MetricDataPoint, MetricsData, @@ -56,7 +56,7 @@ ) from azure.monitor.opentelemetry.exporter.export.trace import _utils as trace_utils from azure.monitor.opentelemetry.exporter._performance_counters._constants import ( - _PERFORMANCE_COUNTER_METRIC_NAME_MAPPINGS + _PERFORMANCE_COUNTER_METRIC_NAME_MAPPINGS, ) _logger = logging.getLogger(__name__) @@ -176,13 +176,14 @@ def _point_to_envelope( envelope.instrumentation_key = self._instrumentation_key # Only set SentToAMW on AKS Attach if _utils._is_on_aks() and _utils._is_attach_enabled() and not self._is_stats_exporter(): - if ( - OTEL_EXPORTER_OTLP_METRICS_ENDPOINT in os.environ - and "otlp" in os.environ.get(OTEL_METRICS_EXPORTER, "") + properties = envelope.data.base_data.properties or {} # type: ignore + if OTEL_EXPORTER_OTLP_METRICS_ENDPOINT in os.environ and "otlp" in os.environ.get( + OTEL_METRICS_EXPORTER, "" ): - envelope.data.base_data.properties["_MS.SentToAMW"] = "True" # type: ignore + properties["_MS.SentToAMW"] = "True" # type: ignore else: - envelope.data.base_data.properties["_MS.SentToAMW"] = "False" # type: ignore + properties["_MS.SentToAMW"] = "False" # type: ignore + envelope.data.base_data.properties = properties # type: ignore return envelope @@ -231,9 +232,10 @@ def _convert_point_to_envelope( ) -> TelemetryItem: envelope = _utils._create_telemetry_item(point.time_unix_nano) envelope.name = _METRIC_ENVELOPE_NAME - envelope.tags.update(_utils._populate_part_a_fields(resource)) # type: ignore + tags = envelope.tags + tags.update(_utils._populate_part_a_fields(resource)) # type: ignore if _utils._is_any_synthetic_source(point.attributes): - envelope.tags[ContextTagKeys.AI_OPERATION_SYNTHETIC_SOURCE] = "True" # type: ignore + tags[ContextTagKeys.AI_OPERATION_SYNTHETIC_SOURCE] = "True" # type: ignore namespace = None if scope is not None and _is_metric_namespace_opted_in(): namespace = str(scope.name)[:256] @@ -274,6 +276,7 @@ def _convert_point_to_envelope( ) envelope.data = MonitorBase(base_data=data, base_type="MetricData") + envelope.tags = tags return envelope diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_exporter.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_exporter.py index 8347b08f7d69..fb56cc6c3f59 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_exporter.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_exporter.py @@ -33,7 +33,7 @@ _REMOTE_DEPENDENCY_ENVELOPE_NAME, ) from azure.monitor.opentelemetry.exporter import _utils -from azure.monitor.opentelemetry.exporter._generated.models import ( +from azure.monitor.opentelemetry.exporter._generated.exporter.models import ( ContextTagKeys, MessageData, MetricDataPoint, @@ -151,20 +151,21 @@ def _get_otel_resource_envelope(self, resource: Resource) -> TelemetryItem: if resource: attributes = resource.attributes envelope = _utils._create_telemetry_item(time_ns()) + tags = envelope.tags or {} envelope.name = _METRIC_ENVELOPE_NAME - envelope.tags.update(_utils._populate_part_a_fields(resource)) # pylint: disable=W0212 + tags.update(_utils._populate_part_a_fields(resource)) # pylint: disable=W0212 envelope.instrumentation_key = self._instrumentation_key data_point = MetricDataPoint( name="_OTELRESOURCE_"[:1024], value=0, ) - data = MetricsData( properties=attributes, metrics=[data_point], ) envelope.data = MonitorBase(base_data=data, base_type="MetricData") + envelope.tags = tags return envelope @@ -208,6 +209,7 @@ def from_connection_string(cls, conn_str: str, **kwargs: Any) -> "AzureMonitorTr # pylint: disable=too-many-statements # pylint: disable=too-many-branches # pylint: disable=protected-access +# pylint: disable=too-many-locals # mypy: disable-error-code="assignment,attr-defined,index,operator,union-attr" @no_type_check def _convert_span_to_envelope(span: ReadableSpan) -> TelemetryItem: @@ -220,14 +222,15 @@ def _convert_span_to_envelope(span: ReadableSpan) -> TelemetryItem: if span.end_time: duration = span.end_time - span.start_time envelope = _utils._create_telemetry_item(start_time) - envelope.tags.update(_utils._populate_part_a_fields(span.resource)) - envelope.tags[ContextTagKeys.AI_OPERATION_ID] = "{:032x}".format(span.context.trace_id) + tags = envelope.tags or {} + tags.update(_utils._populate_part_a_fields(span.resource)) + tags[ContextTagKeys.AI_OPERATION_ID] = "{:032x}".format(span.context.trace_id) if SpanAttributes.ENDUSER_ID in span.attributes: - envelope.tags[ContextTagKeys.AI_USER_ID] = span.attributes[SpanAttributes.ENDUSER_ID] + tags[ContextTagKeys.AI_USER_ID] = span.attributes[SpanAttributes.ENDUSER_ID] if _utils._is_any_synthetic_source(span.attributes): - envelope.tags[ContextTagKeys.AI_OPERATION_SYNTHETIC_SOURCE] = "True" + tags[ContextTagKeys.AI_OPERATION_SYNTHETIC_SOURCE] = "True" if span.parent and span.parent.span_id: - envelope.tags[ContextTagKeys.AI_OPERATION_PARENT_ID] = "{:016x}".format(span.parent.span_id) + tags[ContextTagKeys.AI_OPERATION_PARENT_ID] = "{:016x}".format(span.parent.span_id) if span.kind in (SpanKind.CONSUMER, SpanKind.SERVER): envelope.name = _REQUEST_ENVELOPE_NAME data = RequestData( @@ -240,10 +243,10 @@ def _convert_span_to_envelope(span: ReadableSpan) -> TelemetryItem: measurements={}, ) envelope.data = MonitorBase(base_data=data, base_type="RequestData") - envelope.tags[ContextTagKeys.AI_OPERATION_NAME] = span.name + tags[ContextTagKeys.AI_OPERATION_NAME] = span.name location_ip = trace_utils._get_location_ip(span.attributes) if location_ip: - envelope.tags[ContextTagKeys.AI_LOCATION_IP] = location_ip + tags[ContextTagKeys.AI_LOCATION_IP] = location_ip if _AZURE_SDK_NAMESPACE_NAME in span.attributes: # Azure specific resources # Currently only eventhub and servicebus are supported (kind CONSUMER) data.source = trace_utils._get_azure_sdk_target_source(span.attributes) @@ -255,19 +258,21 @@ def _convert_span_to_envelope(span: ReadableSpan) -> TelemetryItem: if isinstance(enqueued_time, int): difference = (start_time / 1000000) - enqueued_time total += difference - data.measurements["timeSinceEnqueued"] = max(0, total / len(span.links)) + measurements = data.measurements or {} + measurements["timeSinceEnqueued"] = max(0, total / len(span.links)) + data.measurements = measurements elif HTTP_REQUEST_METHOD in span.attributes or SpanAttributes.HTTP_METHOD in span.attributes: # HTTP path = "" user_agent = trace_utils._get_user_agent(span.attributes) if user_agent: # TODO: Not exposed in Swagger, need to update def - envelope.tags["ai.user.userAgent"] = user_agent + tags["ai.user.userAgent"] = user_agent # url url = trace_utils._get_url_for_http_request(span.attributes) data.url = url # Http specific logic for ai.operation.name if SpanAttributes.HTTP_ROUTE in span.attributes: - envelope.tags[ContextTagKeys.AI_OPERATION_NAME] = "{} {}".format( + tags[ContextTagKeys.AI_OPERATION_NAME] = "{} {}".format( span.attributes.get(HTTP_REQUEST_METHOD) or span.attributes.get(SpanAttributes.HTTP_METHOD), span.attributes[SpanAttributes.HTTP_ROUTE], ) @@ -277,14 +282,15 @@ def _convert_span_to_envelope(span: ReadableSpan) -> TelemetryItem: path = parse_url.path if not path: path = "/" - envelope.tags[ContextTagKeys.AI_OPERATION_NAME] = "{} {}".format( + tags[ContextTagKeys.AI_OPERATION_NAME] = "{} {}".format( span.attributes.get(HTTP_REQUEST_METHOD) or span.attributes.get(SpanAttributes.HTTP_METHOD), path, ) except Exception: # pylint: disable=broad-except pass - status_code = span.attributes.get(HTTP_RESPONSE_STATUS_CODE) \ - or span.attributes.get(SpanAttributes.HTTP_STATUS_CODE) + status_code = span.attributes.get(HTTP_RESPONSE_STATUS_CODE) or span.attributes.get( + SpanAttributes.HTTP_STATUS_CODE + ) if status_code: try: status_code = int(status_code) # type: ignore @@ -311,8 +317,8 @@ def _convert_span_to_envelope(span: ReadableSpan) -> TelemetryItem: data.source = span.attributes.get(SpanAttributes.MESSAGING_DESTINATION, "") # Apply truncation # See https://github.com/MohanGsk/ApplicationInsights-Home/tree/master/EndpointSpecs/Schemas/Bond - if envelope.tags.get(ContextTagKeys.AI_OPERATION_NAME): - data.name = envelope.tags[ContextTagKeys.AI_OPERATION_NAME][:1024] + if tags.get(ContextTagKeys.AI_OPERATION_NAME): + data.name = tags[ContextTagKeys.AI_OPERATION_NAME][:1024] if data.response_code: data.response_code = data.response_code[:1024] if data.source: @@ -333,7 +339,7 @@ def _convert_span_to_envelope(span: ReadableSpan) -> TelemetryItem: properties={}, ) envelope.data = MonitorBase(base_data=data, base_type="RemoteDependencyData") - envelope.tags[ContextTagKeys.AI_OPERATION_NAME] = span.name + tags[ContextTagKeys.AI_OPERATION_NAME] = span.name target = trace_utils._get_target_for_dependency_from_peer(span.attributes) if span.kind is SpanKind.CLIENT: gen_ai_attributes_val = "" @@ -349,14 +355,14 @@ def _convert_span_to_envelope(span: ReadableSpan) -> TelemetryItem: user_agent = trace_utils._get_user_agent(span.attributes) if user_agent: # TODO: Not exposed in Swagger, need to update def - envelope.tags["ai.user.userAgent"] = user_agent + tags["ai.user.userAgent"] = user_agent url = trace_utils._get_url_for_http_dependency(span.attributes) # Http specific logic for ai.operation.name if SpanAttributes.HTTP_ROUTE in span.attributes: - envelope.tags[ContextTagKeys.AI_OPERATION_NAME] = "{} {}".format( + tags[ContextTagKeys.AI_OPERATION_NAME] = "{} {}".format( span.attributes.get(HTTP_REQUEST_METHOD) or span.attributes.get(SpanAttributes.HTTP_METHOD), span.attributes[SpanAttributes.HTTP_ROUTE], - ) + ) # data if url: data.data = url @@ -367,16 +373,16 @@ def _convert_span_to_envelope(span: ReadableSpan) -> TelemetryItem: # http specific logic for name if path: data.name = "{} {}".format( - span.attributes.get(HTTP_REQUEST_METHOD) or \ - span.attributes.get(SpanAttributes.HTTP_METHOD), + span.attributes.get(HTTP_REQUEST_METHOD) or span.attributes.get(SpanAttributes.HTTP_METHOD), path, ) - envelope.tags[ContextTagKeys.AI_OPERATION_NAME] = "{} {}".format( + tags[ContextTagKeys.AI_OPERATION_NAME] = "{} {}".format( span.attributes.get(HTTP_REQUEST_METHOD) or span.attributes.get(SpanAttributes.HTTP_METHOD), path, ) - status_code = span.attributes.get(HTTP_RESPONSE_STATUS_CODE) or \ - span.attributes.get(SpanAttributes.HTTP_STATUS_CODE) + status_code = span.attributes.get(HTTP_RESPONSE_STATUS_CODE) or span.attributes.get( + SpanAttributes.HTTP_STATUS_CODE + ) if status_code: try: status_code = int(status_code) # type: ignore @@ -457,8 +463,8 @@ def _convert_span_to_envelope(span: ReadableSpan) -> TelemetryItem: data.type += " | {}".format(span.attributes[_AZURE_SDK_NAMESPACE_NAME]) # Apply truncation # See https://github.com/MohanGsk/ApplicationInsights-Home/tree/master/EndpointSpecs/Schemas/Bond - if envelope.tags.get(ContextTagKeys.AI_OPERATION_NAME): - data.name = envelope.tags[ContextTagKeys.AI_OPERATION_NAME][:1024] + if tags.get(ContextTagKeys.AI_OPERATION_NAME): + data.name = tags[ContextTagKeys.AI_OPERATION_NAME][:1024] elif data.name: data.name = str(data.name)[:1024] if data.result_code: @@ -474,9 +480,7 @@ def _convert_span_to_envelope(span: ReadableSpan) -> TelemetryItem: if _SAMPLE_RATE_KEY in span.attributes: envelope.sample_rate = span.attributes[_SAMPLE_RATE_KEY] - data.properties = _utils._filter_custom_properties( - span.attributes, lambda key, val: not _is_standard_attribute(key) - ) + properties = _utils._filter_custom_properties(span.attributes, lambda key, val: not _is_standard_attribute(key)) # Standard metrics special properties # Only add the property if span was generated from instrumentation that supports metrics collection @@ -484,7 +488,7 @@ def _convert_span_to_envelope(span: ReadableSpan) -> TelemetryItem: span.instrumentation_scope is not None and span.instrumentation_scope.name in _INSTRUMENTATION_SUPPORTING_METRICS_LIST ): - data.properties["_MS.ProcessedByMetricExtractors"] = "True" + properties["_MS.ProcessedByMetricExtractors"] = "True" if span.links: # Max length for value is 8192 @@ -496,7 +500,9 @@ def _convert_span_to_envelope(span: ReadableSpan) -> TelemetryItem: operation_id = "{:032x}".format(link.context.trace_id) span_id = "{:016x}".format(link.context.span_id) links.append({"operation_Id": operation_id, "id": span_id}) - data.properties["_MS.links"] = json.dumps(links) + properties["_MS.links"] = json.dumps(links) + data.properties = properties + envelope.tags = tags return envelope @@ -505,10 +511,11 @@ def _convert_span_events_to_envelopes(span: ReadableSpan) -> Sequence[TelemetryI envelopes = [] for event in span.events: envelope = _utils._create_telemetry_item(event.timestamp) - envelope.tags.update(_utils._populate_part_a_fields(span.resource)) - envelope.tags[ContextTagKeys.AI_OPERATION_ID] = "{:032x}".format(span.context.trace_id) + tags = envelope.tags or {} + tags.update(_utils._populate_part_a_fields(span.resource)) + tags[ContextTagKeys.AI_OPERATION_ID] = "{:032x}".format(span.context.trace_id) if span.context and span.context.span_id: - envelope.tags[ContextTagKeys.AI_OPERATION_PARENT_ID] = "{:016x}".format(span.context.span_id) + tags[ContextTagKeys.AI_OPERATION_PARENT_ID] = "{:016x}".format(span.context.span_id) # sampleRate if span.attributes and _SAMPLE_RATE_KEY in span.attributes: @@ -547,7 +554,7 @@ def _convert_span_events_to_envelopes(span: ReadableSpan) -> Sequence[TelemetryI properties=properties, ) envelope.data = MonitorBase(base_data=data, base_type="MessageData") - + envelope.tags = tags envelopes.append(envelope) return envelopes @@ -579,8 +586,7 @@ def _is_standard_attribute(key: str) -> bool: for prefix in _STANDARD_OPENTELEMETRY_ATTRIBUTE_PREFIXES: if key.startswith(prefix): return True - return key in _STANDARD_AZURE_MONITOR_ATTRIBUTES or \ - key in _STANDARD_OPENTELEMETRY_HTTP_ATTRIBUTES + return key in _STANDARD_AZURE_MONITOR_ATTRIBUTES or key in _STANDARD_OPENTELEMETRY_HTTP_ATTRIBUTES def _get_trace_export_result(result: ExportResult) -> SpanExportResult: diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_rate_limited_sampling.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_rate_limited_sampling.py index 71f7db227e98..184e06ab5b89 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_rate_limited_sampling.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_rate_limited_sampling.py @@ -24,12 +24,14 @@ parent_context_sampling, ) + class _State: def __init__(self, effective_window_count: float, effective_window_nanoseconds: float, last_nano_time: int): self.effective_window_count = effective_window_count self.effective_window_nanoseconds = effective_window_nanoseconds self.last_nano_time = last_nano_time + class RateLimitedSamplingPercentage: def __init__(self, target_spans_per_second_limit: float, round_to_nearest: bool = True): if target_spans_per_second_limit < 0.0: @@ -46,9 +48,7 @@ def __init__(self, target_spans_per_second_limit: float, round_to_nearest: bool def _update_state(self, old_state: _State, current_nano_time: int) -> _State: if current_nano_time <= old_state.last_nano_time: return _State( - old_state.effective_window_count + 1, - old_state.effective_window_nanoseconds, - old_state.last_nano_time + old_state.effective_window_count + 1, old_state.effective_window_nanoseconds, old_state.last_nano_time ) nano_time_delta = current_nano_time - old_state.last_nano_time decay_factor = math.exp(-nano_time_delta * self._inverse_adaptation_time_nanoseconds) @@ -70,9 +70,8 @@ def get(self) -> float: return 100.0 sampling_probability = ( - (current_state.effective_window_nanoseconds * self._target_spans_per_nanosecond_limit) / - current_state.effective_window_count - ) + current_state.effective_window_nanoseconds * self._target_spans_per_nanosecond_limit + ) / current_state.effective_window_count sampling_percentage = 100 * min(sampling_probability, 1.0) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_sampling.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_sampling.py index 293751ddd520..9f676bf50fa9 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_sampling.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_sampling.py @@ -18,7 +18,6 @@ from azure.monitor.opentelemetry.exporter._constants import _SAMPLE_RATE_KEY - # Sampler is responsible for the following: # Implements same trace id hashing algorithm so that traces are sampled the same across multiple nodes (via AI SDKS) # Adds item count to span attribute if span is sampled (needed for ingestion service) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_utils.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_utils.py index 8a7a57b8f2d5..fccac2f34f77 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_utils.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/export/trace/_utils.py @@ -95,8 +95,7 @@ def _get_azure_sdk_target_source(attributes: Attributes) -> Optional[str]: def _get_http_scheme(attributes: Attributes) -> Optional[str]: if attributes: - scheme = attributes.get(url_attributes.URL_SCHEME) or \ - attributes.get(SpanAttributes.HTTP_SCHEME) + scheme = attributes.get(url_attributes.URL_SCHEME) or attributes.get(SpanAttributes.HTTP_SCHEME) if scheme: return str(scheme) return None @@ -160,8 +159,9 @@ def _get_target_for_dependency_from_peer(attributes: Attributes) -> Optional[str port = attributes[SpanAttributes.NET_PEER_PORT] # TODO: check default port for rpc # This logic assumes default ports never conflict across dependency types - if port != _get_default_port_http(attributes) and \ - port != _get_default_port_db(str(attributes.get(SpanAttributes.DB_SYSTEM))): + if port != _get_default_port_http(attributes) and port != _get_default_port_db( + str(attributes.get(SpanAttributes.DB_SYSTEM)) + ): target = "{}:{}".format(target, port) return target @@ -264,17 +264,19 @@ def _get_target_for_rpc_dependency(target: Optional[str], attributes: Attributes # Request + @no_type_check def _get_location_ip(attributes: Attributes) -> Optional[str]: - return attributes.get(client_attributes.CLIENT_ADDRESS) or \ - attributes.get(SpanAttributes.HTTP_CLIENT_IP) or \ - attributes.get(SpanAttributes.NET_PEER_IP) # We assume non-http spans don't have http related attributes + return ( + attributes.get(client_attributes.CLIENT_ADDRESS) + or attributes.get(SpanAttributes.HTTP_CLIENT_IP) + or attributes.get(SpanAttributes.NET_PEER_IP) + ) # We assume non-http spans don't have http related attributes @no_type_check def _get_user_agent(attributes: Attributes) -> Optional[str]: - return attributes.get(user_agent_attributes.USER_AGENT_ORIGINAL) or \ - attributes.get(SpanAttributes.HTTP_USER_AGENT) + return attributes.get(user_agent_attributes.USER_AGENT_ORIGINAL) or attributes.get(SpanAttributes.HTTP_USER_AGENT) @no_type_check @@ -293,10 +295,7 @@ def _get_url_for_http_request(attributes: Attributes) -> Optional[str]: if url_attributes.URL_PATH in attributes: http_target = attributes.get(url_attributes.URL_PATH, "") if http_target and url_attributes.URL_QUERY in attributes: - http_target = "{}?{}".format( - http_target, - attributes.get(url_attributes.URL_QUERY, "") - ) + http_target = "{}?{}".format(http_target, attributes.get(url_attributes.URL_QUERY, "")) elif SpanAttributes.HTTP_TARGET in attributes: http_target = attributes.get(SpanAttributes.HTTP_TARGET) if scheme and http_target: @@ -305,10 +304,7 @@ def _get_url_for_http_request(attributes: Attributes) -> Optional[str]: if server_attributes.SERVER_ADDRESS in attributes: http_host = attributes.get(server_attributes.SERVER_ADDRESS, "") if http_host and server_attributes.SERVER_PORT in attributes: - http_host = "{}:{}".format( - http_host, - attributes.get(server_attributes.SERVER_PORT, "") - ) + http_host = "{}:{}".format(http_host, attributes.get(server_attributes.SERVER_PORT, "")) elif SpanAttributes.HTTP_HOST in attributes: http_host = attributes.get(SpanAttributes.HTTP_HOST, "") if http_host: @@ -337,6 +333,7 @@ def _get_url_for_http_request(attributes: Attributes) -> Optional[str]: ) return url + def _get_DJB2_sample_score(trace_id_hex: str) -> float: # This algorithm uses 32bit integers hash_value = _SAMPLING_HASH @@ -353,6 +350,7 @@ def _get_DJB2_sample_score(trace_id_hex: str) -> float: # divide by _INT32_MAX for value between 0 and 1 for sampling score return float(hash_value) / _INT32_MAX + def _round_down_to_nearest(sampling_percentage: float) -> float: if sampling_percentage == 0: return 0 @@ -365,9 +363,9 @@ def _round_down_to_nearest(sampling_percentage: float) -> float: return 0.0 return 100.0 / math.ceil(item_count) + def parent_context_sampling( - parent_context: Optional[Context], - attributes: Attributes = None + parent_context: Optional[Context], attributes: Attributes = None ) -> Optional["SamplingResult"]: if parent_context is not None: @@ -385,7 +383,7 @@ def parent_context_sampling( _get_parent_trace_state(parent_context), ) - parent_attributes = getattr(parent_span, 'attributes', {}) + parent_attributes = getattr(parent_span, "attributes", {}) parent_sample_rate = parent_attributes.get(_SAMPLE_RATE_KEY) if parent_sample_rate is not None: diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/__init__.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/__init__.py index d95f5d59ea09..87f11c90ea0e 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/__init__.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/__init__.py @@ -18,8 +18,8 @@ ) __all__ = [ - 'StatsbeatConfig', - 'StatsbeatManager', - 'collect_statsbeat_metrics', - 'shutdown_statsbeat_metrics', + "StatsbeatConfig", + "StatsbeatManager", + "collect_statsbeat_metrics", + "shutdown_statsbeat_metrics", ] diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_manager.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_manager.py index d910d103e856..d8f8535c0e6e 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_manager.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_manager.py @@ -28,14 +28,16 @@ class StatsbeatConfig: """Configuration class for Statsbeat metrics collection.""" - def __init__(self, - endpoint: str, - region: str, - instrumentation_key: str, - disable_offline_storage: bool = False, - credential: Optional[Any] = None, - distro_version: Optional[str] = None, - connection_string: Optional[str] = None) -> None: + def __init__( + self, + endpoint: str, + region: str, + instrumentation_key: str, + disable_offline_storage: bool = False, + credential: Optional[Any] = None, + distro_version: Optional[str] = None, + connection_string: Optional[str] = None, + ) -> None: # Customer specific information self.endpoint = endpoint self.region = region @@ -60,16 +62,19 @@ def __init__(self, self.connection_string = _get_stats_connection_string(endpoint) @classmethod - def from_exporter(cls, exporter: Any) -> Optional['StatsbeatConfig']: + def from_exporter(cls, exporter: Any) -> Optional["StatsbeatConfig"]: # Create configuration from an exporter instance # Validate required fields from exporter - if not hasattr(exporter, '_instrumentation_key') or not exporter._instrumentation_key: # pylint: disable=protected-access + if ( + not hasattr(exporter, "_instrumentation_key") + or not exporter._instrumentation_key # pylint: disable=protected-access + ): logger.warning("Exporter is missing a valid instrumentation key.") return None - if not hasattr(exporter, '_endpoint') or not exporter._endpoint: # pylint: disable=protected-access + if not hasattr(exporter, "_endpoint") or not exporter._endpoint: # pylint: disable=protected-access logger.warning("Exporter is missing a valid endpoint.") return None - if not hasattr(exporter, '_region') or not exporter._region: # pylint: disable=protected-access + if not hasattr(exporter, "_region") or not exporter._region: # pylint: disable=protected-access logger.warning("Exporter is missing a valid region.") return None @@ -83,7 +88,7 @@ def from_exporter(cls, exporter: Any) -> Optional['StatsbeatConfig']: ) @classmethod - def from_config(cls, base_config: 'StatsbeatConfig', config_dict: Dict[str, str]) -> Optional['StatsbeatConfig']: + def from_config(cls, base_config: "StatsbeatConfig", config_dict: Dict[str, str]) -> Optional["StatsbeatConfig"]: """Update configuration from a dictionary. Used in conjunction with OneSettings control plane. Creates a new StatsbeatConfig instance with the same base configuration but updated @@ -114,17 +119,18 @@ def from_config(cls, base_config: 'StatsbeatConfig', config_dict: Dict[str, str] # TODO: Add support for disable_offline_storage from config_dict once supported in control plane disable_offline_storage = config_dict.get("disable_offline_storage") - disable_offline_storage_config = isinstance(disable_offline_storage, str) \ - and disable_offline_storage.lower() == "true" + disable_offline_storage_config = ( + isinstance(disable_offline_storage, str) and disable_offline_storage.lower() == "true" + ) return cls( endpoint=base_config.endpoint, region=base_config.region, instrumentation_key=base_config.instrumentation_key, - disable_offline_storage=disable_offline_storage_config, # TODO: Use config value once supported + disable_offline_storage=disable_offline_storage_config, # TODO: Use config value once supported credential=base_config.credential, distro_version=base_config.distro_version, - connection_string=connection_string + connection_string=connection_string, ) def __eq__(self, other: object) -> bool: @@ -132,8 +138,8 @@ def __eq__(self, other: object) -> bool: if not isinstance(other, StatsbeatConfig): return False return ( - str(self.connection_string) == str(other.connection_string) and - self.disable_offline_storage == other.disable_offline_storage + str(self.connection_string) == str(other.connection_string) + and self.disable_offline_storage == other.disable_offline_storage ) def __hash__(self) -> int: @@ -210,7 +216,7 @@ def _do_initialize(self, config: StatsbeatConfig) -> bool: # Create metric reader reader = PeriodicExportingMetricReader( statsbeat_exporter, - export_interval_millis=_get_stats_short_export_interval() * 1000, # 15m by default + export_interval_millis=_get_stats_short_export_interval() * 1000, # 15m by default ) # Create meter provider @@ -333,7 +339,7 @@ def get_current_config(self) -> Optional[StatsbeatConfig]: disable_offline_storage=self._config.disable_offline_storage, credential=self._config.credential, distro_version=self._config.distro_version, - connection_string=self._config.connection_string + connection_string=self._config.connection_string, ) def is_initialized(self) -> bool: diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_state.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_state.py index 6a1f43f5057a..259488b8dff2 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_state.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_state.py @@ -4,9 +4,7 @@ import threading from typing import TYPE_CHECKING, Dict, Union -from azure.monitor.opentelemetry.exporter._constants import ( - _APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL -) +from azure.monitor.opentelemetry.exporter._constants import _APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL if TYPE_CHECKING: from azure.monitor.opentelemetry.exporter.statsbeat._manager import StatsbeatManager @@ -28,6 +26,7 @@ # Global singleton instance for easy access throughout the codebase _statsbeat_manager = None + def get_statsbeat_manager() -> "StatsbeatManager": """Get the global Statsbeat Manager singleton instance. @@ -39,9 +38,11 @@ def get_statsbeat_manager() -> "StatsbeatManager": global _statsbeat_manager # pylint: disable=global-statement if _statsbeat_manager is None: from azure.monitor.opentelemetry.exporter.statsbeat._manager import StatsbeatManager + _statsbeat_manager = StatsbeatManager() return _statsbeat_manager + def is_statsbeat_enabled(): disabled = os.environ.get(_APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL) return disabled is None or disabled.lower() != "true" @@ -96,9 +97,11 @@ def set_statsbeat_shutdown(shutdown: bool): with _STATSBEAT_STATE_LOCK: _STATSBEAT_STATE["SHUTDOWN"] = shutdown -def get_statsbeat_customer_sdkstats_feature_set(): # pylint: disable=name-too-long + +def get_statsbeat_customer_sdkstats_feature_set(): # pylint: disable=name-too-long return _STATSBEAT_STATE["CUSTOMER_SDKSTATS_FEATURE_SET"] -def set_statsbeat_customer_sdkstats_feature_set(): # pylint: disable=name-too-long + +def set_statsbeat_customer_sdkstats_feature_set(): # pylint: disable=name-too-long with _STATSBEAT_STATE_LOCK: _STATSBEAT_STATE["CUSTOMER_SDKSTATS_FEATURE_SET"] = True diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_statsbeat.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_statsbeat.py index 1f2c7623f0cd..4aa8b2eec9a4 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_statsbeat.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_statsbeat.py @@ -17,6 +17,7 @@ logger = logging.getLogger(__name__) + # pyright: ignore def collect_statsbeat_metrics(exporter: "BaseExporter") -> None: # pyright: ignore config = StatsbeatConfig.from_exporter(exporter) @@ -44,10 +45,7 @@ def get_statsbeat_configuration_callback(settings: Dict[str, str]): manager = get_statsbeat_manager() # Check if SDK stats should be enabled based on configuration - sdk_stats_enabled = evaluate_feature( - _ONE_SETTINGS_FEATURE_SDK_STATS, - settings - ) + sdk_stats_enabled = evaluate_feature(_ONE_SETTINGS_FEATURE_SDK_STATS, settings) if sdk_stats_enabled: current_config = manager.get_current_config() # Since config is preserved between shutdowns, diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_statsbeat_metrics.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_statsbeat_metrics.py index 13473abb7543..13ee2764de90 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_statsbeat_metrics.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_statsbeat_metrics.py @@ -39,12 +39,15 @@ ) from azure.monitor.opentelemetry.exporter import _utils + # Use a function to get VERSION lazily def _get_version() -> str: # Get VERSION using delayed import to avoid circular import. from azure.monitor.opentelemetry.exporter import VERSION + return VERSION + # cSpell:disable _AIMS_URI = "http://169.254.169.254/metadata/instance/compute" diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_utils.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_utils.py index 0a1be8f216a8..62378e5a58df 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_utils.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/_utils.py @@ -26,6 +26,7 @@ _REQUESTS_MAP_LOCK, ) + def _get_stats_connection_string(endpoint: str) -> str: cs_env = os.environ.get(_APPLICATIONINSIGHTS_STATS_CONNECTION_STRING_ENV_NAME) if cs_env: @@ -80,8 +81,10 @@ def _update_requests_map(type_name, value): _REQUESTS_MAP[type_name] = {} _REQUESTS_MAP[type_name][value] = prev + 1 + ## OneSettings Config + # pylint: disable=too-many-return-statements def _get_connection_string_for_region_from_config(target_region: str, settings: Dict[str, str]) -> Optional[str]: """Get the appropriate stats connection string for the given region. @@ -135,8 +138,9 @@ def _get_connection_string_for_region_from_config(target_region: str, settings: boundary_regions = json.loads(boundary_regions) # Check if the region is in this boundary's regions - if isinstance(boundary_regions, list) and \ - any(target_region.lower() == r.lower() for r in boundary_regions): + if isinstance(boundary_regions, list) and any( + target_region.lower() == r.lower() for r in boundary_regions + ): # Found the boundary, get the corresponding connection string connection_string_key = f"{boundary}_STATS_CONNECTION_STRING" connection_string = settings.get(connection_string_key) @@ -144,8 +148,7 @@ def _get_connection_string_for_region_from_config(target_region: str, settings: if connection_string: return connection_string - logger.warning("Connection string key '%s' not found in configuration", - connection_string_key) + logger.warning("Connection string key '%s' not found in configuration", connection_string_key) # Region not found in any specific boundary, try DEFAULT if not default_connection_string: @@ -156,6 +159,5 @@ def _get_connection_string_for_region_from_config(target_region: str, settings: logger.warning("Error parsing configuration for region '%s': %s", target_region, str(ex)) return None except Exception as ex: # pylint: disable=broad-exception-caught - logger.warning("Unexpected error getting stats connection string for region '%s': %s", - target_region, str(ex)) + logger.warning("Unexpected error getting stats connection string for region '%s': %s", target_region, str(ex)) return None diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/__init__.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/__init__.py index 85b701b2f9f2..98a5624f4ae6 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/__init__.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/__init__.py @@ -13,6 +13,6 @@ __all__ = [ "get_customer_stats_manager", - "collect_customer_sdkstats", + "collect_customer_sdkstats", "shutdown_customer_sdkstats_metrics", ] diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_customer_sdkstats.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_customer_sdkstats.py index 9c3d2db4eb35..36cf51865df9 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_customer_sdkstats.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_customer_sdkstats.py @@ -23,6 +23,7 @@ def collect_customer_sdkstats(exporter: "BaseExporter") -> None: # type: ignore customer_stats.initialize(connection_string=exporter._connection_string) # type: ignore set_statsbeat_customer_sdkstats_feature_set() + def shutdown_customer_sdkstats_metrics() -> None: # Shutdown customer SDKStats metrics collection. customer_stats = get_customer_stats_manager() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_manager.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_manager.py index 171fbc1ae152..10b37fa207bb 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_manager.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_manager.py @@ -37,10 +37,11 @@ class CustomerSdkStatsStatus(Enum): """Status enumeration for Customer SDK Stats Manager.""" - DISABLED = "disabled" # Feature is disabled via environment variable - UNINITIALIZED = "uninitialized" # Manager created but not initialized - ACTIVE = "active" # Fully initialized and operational - SHUTDOWN = "shutdown" # Has been shut down + + DISABLED = "disabled" # Feature is disabled via environment variable + UNINITIALIZED = "uninitialized" # Manager created but not initialized + ACTIVE = "active" # Fully initialized and operational + SHUTDOWN = "shutdown" # Has been shut down class _CustomerSdkStatsTelemetryCounters: @@ -50,11 +51,11 @@ def __init__(self): self.total_item_retry_count: Dict[str, Dict[RetryCodeType, Dict[str, int]]] = {} # type: ignore -class CustomerSdkStatsManager(metaclass=Singleton): # pylint: disable=too-many-instance-attributes +class CustomerSdkStatsManager(metaclass=Singleton): # pylint: disable=too-many-instance-attributes def __init__(self): # Initialize instance attributes that remain constant. Called only once due to Singleton metaclass. self._initialization_lock = threading.Lock() # For initialization/shutdown operations - self._counters_lock = threading.Lock() # For counter operations and callbacks + self._counters_lock = threading.Lock() # For counter operations and callbacks # Determine initial status based on environment if is_customer_sdkstats_enabled(): @@ -74,6 +75,7 @@ def __init__(self): # Initialize customer properties if enabled if self._status != CustomerSdkStatsStatus.DISABLED: from azure.monitor.opentelemetry.exporter import VERSION + # Pre-build base attributes for all metrics to avoid recreation on each callback self._base_attributes: Optional[Dict[str, Any]] = { # type: ignore "language": self._language, @@ -163,7 +165,7 @@ def _do_initialize(self, connection_string: str) -> bool: ) metric_reader_options = { "exporter": self._customer_sdkstats_exporter, - "export_interval_millis": get_customer_sdkstats_export_interval() * 1000 # Default 15m + "export_interval_millis": get_customer_sdkstats_export_interval() * 1000, # Default 15m } self._customer_sdkstats_metric_reader = PeriodicExportingMetricReader(**metric_reader_options) self._customer_sdkstats_meter_provider = MeterProvider( @@ -174,17 +176,17 @@ def _do_initialize(self, connection_string: str) -> bool: self._success_gauge = self._customer_sdkstats_meter.create_observable_gauge( name=CustomerSdkStatsMetricName.ITEM_SUCCESS_COUNT.value, description="Tracks successful telemetry items sent to Azure Monitor", - callbacks=[self._item_success_callback] + callbacks=[self._item_success_callback], ) self._dropped_gauge = self._customer_sdkstats_meter.create_observable_gauge( name=CustomerSdkStatsMetricName.ITEM_DROP_COUNT.value, description="Tracks dropped telemetry items sent to Azure Monitor", - callbacks=[self._item_drop_callback] + callbacks=[self._item_drop_callback], ) self._retry_gauge = self._customer_sdkstats_meter.create_observable_gauge( name=CustomerSdkStatsMetricName.ITEM_RETRY_COUNT.value, description="Tracks retry attempts for telemetry items sent to Azure Monitor", - callbacks=[self._item_retry_callback] + callbacks=[self._item_retry_callback], ) # Set status to active after successful initialization @@ -245,8 +247,12 @@ def count_successful_items(self, count: int, telemetry_type: str) -> None: self._counters.total_item_success_count[telemetry_type] = count def count_dropped_items( - self, count: int, telemetry_type: str, drop_code: DropCodeType, telemetry_success: Union[bool, None], - exception_message: Optional[str] = None + self, + count: int, + telemetry_type: str, + drop_code: DropCodeType, + telemetry_success: Union[bool, None], + exception_message: Optional[str] = None, ) -> None: if not self.is_initialized or count <= 0 or telemetry_success is None: return @@ -271,8 +277,7 @@ def count_dropped_items( success_map[success_key] = current_count + count def count_retry_items( - self, count: int, telemetry_type: str, retry_code: RetryCodeType, - exception_message: Optional[str] = None + self, count: int, telemetry_type: str, retry_code: RetryCodeType, exception_message: Optional[str] = None ) -> None: if not self.is_initialized or count <= 0: return @@ -291,7 +296,9 @@ def count_retry_items( current_count = reason_map.get(reason, 0) reason_map[reason] = current_count + count - def _item_success_callback(self, options: CallbackOptions) -> Iterable[Observation]: # pylint: disable=unused-argument + def _item_success_callback( + self, options: CallbackOptions # pylint: disable=unused-argument + ) -> Iterable[Observation]: if not self.is_initialized or not self._base_attributes: return [] @@ -310,7 +317,7 @@ def _item_success_callback(self, options: CallbackOptions) -> Iterable[Observati return observations - def _item_drop_callback(self, options: CallbackOptions) -> Iterable[Observation]: # pylint: disable=unused-argument + def _item_drop_callback(self, options: CallbackOptions) -> Iterable[Observation]: # pylint: disable=unused-argument if not self.is_initialized or not self._base_attributes: return [] observations: List[Observation] = [] @@ -336,7 +343,9 @@ def _item_drop_callback(self, options: CallbackOptions) -> Iterable[Observation] return observations - def _item_retry_callback(self, options: CallbackOptions) -> Iterable[Observation]: # pylint: disable=unused-argument + def _item_retry_callback( + self, options: CallbackOptions # pylint: disable=unused-argument + ) -> Iterable[Observation]: if not self.is_initialized or not self._base_attributes: return [] observations: List[Observation] = [] @@ -369,7 +378,7 @@ def _get_drop_reason(self, drop_code: DropCodeType, exception_message: Optional[ DropCode.CLIENT_READONLY: "Client readonly", DropCode.CLIENT_STORAGE_DISABLED: "Client local storage disabled", DropCode.CLIENT_PERSISTENCE_CAPACITY: "Client persistence capacity", - DropCode.UNKNOWN: "Unknown reason" + DropCode.UNKNOWN: "Unknown reason", } return drop_code_reasons.get(drop_code, DropCode.UNKNOWN) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_state.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_state.py index f35e56c54aa7..fbc6a9fad019 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_state.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_state.py @@ -9,6 +9,7 @@ # Global singleton instance for easy access throughout the codebase _customer_stats_manager = None + def get_customer_stats_manager() -> "CustomerSdkStatsManager": # type: ignore # Get the global CustomerSdkStatsManager singleton instance. @@ -18,15 +19,14 @@ def get_customer_stats_manager() -> "CustomerSdkStatsManager": # type: ignore global _customer_stats_manager # pylint: disable=global-statement if _customer_stats_manager is None: from ._manager import CustomerSdkStatsManager + _customer_stats_manager = CustomerSdkStatsManager() return _customer_stats_manager + # TODO: Move to a storage manager -_LOCAL_STORAGE_SETUP_STATE = { - "READONLY": False, - "EXCEPTION_OCCURRED": "" -} +_LOCAL_STORAGE_SETUP_STATE = {"READONLY": False, "EXCEPTION_OCCURRED": ""} _LOCAL_STORAGE_SETUP_STATE_LOCK = threading.Lock() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_utils.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_utils.py index 0030e7726098..5e676e3e70b6 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_utils.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/azure/monitor/opentelemetry/exporter/statsbeat/customer/_utils.py @@ -2,6 +2,7 @@ # Licensed under the MIT License. import os from typing import Optional, List, Tuple, Union + # mypy: disable-error-code="import-untyped" from requests import ReadTimeout, Timeout from azure.core.exceptions import ServiceRequestTimeoutError @@ -19,7 +20,7 @@ _exception_categories, ) from azure.monitor.opentelemetry.exporter._utils import _get_telemetry_type -from azure.monitor.opentelemetry.exporter._generated.models import TelemetryItem +from azure.monitor.opentelemetry.exporter._generated.exporter.models import TelemetryItem from ._state import ( get_local_storage_setup_state_exception, get_customer_stats_manager, @@ -28,7 +29,7 @@ def get_customer_sdkstats_export_interval() -> int: """Get the export interval for customer SDK stats from environment or default. - + :return: Export interval in seconds :rtype: int """ @@ -43,7 +44,7 @@ def get_customer_sdkstats_export_interval() -> int: def is_customer_sdkstats_enabled() -> bool: """Check if customer SDK stats collection is enabled via environment variable. - + :return: True if enabled, False otherwise :rtype: bool """ @@ -52,7 +53,7 @@ def is_customer_sdkstats_enabled() -> bool: def categorize_status_code(status_code: int) -> str: """Categorize HTTP status codes into human-readable messages. - + :param status_code: HTTP status code :type status_code: int :return: Human-readable status message @@ -81,9 +82,11 @@ def categorize_status_code(status_code: int) -> str: return f"status_{status_code}" -def _determine_client_retry_code(error) -> Tuple[RetryCodeType, Optional[str]]: # pylint: disable=docstring-missing-type +def _determine_client_retry_code( # pylint: disable=docstring-missing-type + error, +) -> Tuple[RetryCodeType, Optional[str]]: """Determine the retry code and message for a given error. - + :param error: The error that occurred :return: Tuple of retry code and optional message :rtype: Tuple[RetryCodeType, Optional[str]] @@ -98,17 +101,17 @@ def _determine_client_retry_code(error) -> Tuple[RetryCodeType, Optional[str]]: ConnectionError, OSError, ) - if hasattr(error, 'status_code') and error.status_code in [401, 403, 408, 429, 500, 502, 503, 504]: + if hasattr(error, "status_code") and error.status_code in [401, 403, 408, 429, 500, 502, 503, 504]: # For specific status codes, preserve the custom message if available - error_message = getattr(error, 'message', None) if hasattr(error, 'message') else None + error_message = getattr(error, "message", None) if hasattr(error, "message") else None return (error.status_code, error_message or _UNKNOWN) if isinstance(error, timeout_exception_types): return (RetryCode.CLIENT_TIMEOUT, _exception_categories.TIMEOUT_EXCEPTION.value) - if hasattr(error, 'message'): - error_message = getattr(error, 'message', None) if hasattr(error, 'message') else None - if error_message is not None and ('timeout' in error_message.lower() or 'timed out' in error_message.lower()): + if hasattr(error, "message"): + error_message = getattr(error, "message", None) if hasattr(error, "message") else None + if error_message is not None and ("timeout" in error_message.lower() or "timed out" in error_message.lower()): return (RetryCode.CLIENT_TIMEOUT, _exception_categories.TIMEOUT_EXCEPTION.value) if isinstance(error, network_exception_types): @@ -119,7 +122,7 @@ def _determine_client_retry_code(error) -> Tuple[RetryCodeType, Optional[str]]: def _get_telemetry_success_flag(envelope: TelemetryItem) -> Union[bool, None]: """Extract the success flag from a telemetry envelope. - + :param envelope: The telemetry envelope :type envelope: TelemetryItem :return: Success flag if available, None otherwise @@ -145,7 +148,7 @@ def _get_telemetry_success_flag(envelope: TelemetryItem) -> Union[bool, None]: def track_successful_items(envelopes: List[TelemetryItem]): """Track successful telemetry items in customer SDK stats. - + :param envelopes: List of telemetry envelopes that were successfully sent :type envelopes: List[TelemetryItem] """ @@ -153,17 +156,10 @@ def track_successful_items(envelopes: List[TelemetryItem]): for envelope in envelopes: telemetry_type = _get_telemetry_type(envelope) - customer_stats.count_successful_items( - 1, - telemetry_type - ) + customer_stats.count_successful_items(1, telemetry_type) -def track_dropped_items( - envelopes: List[TelemetryItem], - drop_code: DropCodeType, - error_message: Optional[str] = None - ): +def track_dropped_items(envelopes: List[TelemetryItem], drop_code: DropCodeType, error_message: Optional[str] = None): customer_stats = get_customer_stats_manager() if error_message is None: @@ -173,7 +169,7 @@ def track_dropped_items( 1, telemetry_type, drop_code, - _get_telemetry_success_flag(envelope) if telemetry_type in (_REQUEST, _DEPENDENCY) else True + _get_telemetry_success_flag(envelope) if telemetry_type in (_REQUEST, _DEPENDENCY) else True, ) else: for envelope in envelopes: @@ -183,7 +179,7 @@ def track_dropped_items( telemetry_type, drop_code, _get_telemetry_success_flag(envelope) if telemetry_type in (_REQUEST, _DEPENDENCY) else True, - exception_message=error_message + exception_message=error_message, ) @@ -196,25 +192,11 @@ def track_retry_items(envelopes: List[TelemetryItem], error) -> None: if isinstance(retry_code, int): # For status codes, include the message if available if message: - customer_stats.count_retry_items( - 1, - telemetry_type, - retry_code, - str(message) - ) + customer_stats.count_retry_items(1, telemetry_type, retry_code, str(message)) else: - customer_stats.count_retry_items( - 1, - telemetry_type, - retry_code - ) + customer_stats.count_retry_items(1, telemetry_type, retry_code) else: - customer_stats.count_retry_items( - 1, - telemetry_type, - retry_code, - str(message) - ) + customer_stats.count_retry_items(1, telemetry_type, retry_code, str(message)) def track_dropped_items_from_storage(result_from_storage_put, envelopes): @@ -232,10 +214,14 @@ def track_dropped_items_from_storage(result_from_storage_put, envelopes): track_dropped_items(envelopes, DropCode.CLIENT_PERSISTENCE_CAPACITY) elif get_local_storage_setup_state_exception() != "": # For exceptions caught in _check_and_set_folder_permissions during storage setup - track_dropped_items(envelopes, DropCode.CLIENT_EXCEPTION, _exception_categories.STORAGE_EXCEPTION.value) # pylint: disable=line-too-long + track_dropped_items( + envelopes, DropCode.CLIENT_EXCEPTION, _exception_categories.STORAGE_EXCEPTION.value + ) # pylint: disable=line-too-long elif isinstance(result_from_storage_put, str): # For any exceptions occurred in put method of either LocalFileStorage or LocalFileBlob, track dropped item with reason # pylint: disable=line-too-long - track_dropped_items(envelopes, DropCode.CLIENT_EXCEPTION, _exception_categories.STORAGE_EXCEPTION.value) # pylint: disable=line-too-long + track_dropped_items( + envelopes, DropCode.CLIENT_EXCEPTION, _exception_categories.STORAGE_EXCEPTION.value + ) # pylint: disable=line-too-long else: # LocalFileBlob.put returns StorageExportResult.LOCAL_FILE_BLOB_SUCCESS here. Don't need to track anything in this case. # pylint: disable=line-too-long pass diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/samples/logs/sample_custom_event.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/samples/logs/sample_custom_event.py index 0ad297812b44..753fccf7b1c1 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/samples/logs/sample_custom_event.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/samples/logs/sample_custom_event.py @@ -32,10 +32,13 @@ logger.setLevel(logging.INFO) # You can send `customEvent`` telemetry using a special `microsoft` attribute key through logging -# The name of the `customEvent` will correspond to the value of the attribute` +# The name of the `customEvent` will correspond to the value of the attribute` logger.info("Hello World!", extra={"microsoft.custom_event.name": "test-event-name", "additional_attrs": "val1"}) # You can also populate fields like client_Ip with attribute `client.address` -logger.info("This entry will have a custom client_Ip", extra={"microsoft.custom_event.name": "test_event", "client.address": "192.168.1.1"}) +logger.info( + "This entry will have a custom client_Ip", + extra={"microsoft.custom_event.name": "test_event", "client.address": "192.168.1.1"}, +) input() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/configuration/test_manager.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/configuration/test_manager.py index f77e2c692878..1b4695611972 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/configuration/test_manager.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/configuration/test_manager.py @@ -26,7 +26,7 @@ class TestConfigurationState(unittest.TestCase): def test_default_values(self): """Test that _ConfigurationState has correct default values.""" state = _ConfigurationState() - + self.assertEqual(state.etag, "") self.assertEqual(state.refresh_interval, _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS) self.assertEqual(state.version_cache, -1) @@ -36,7 +36,7 @@ def test_with_updates_single_field(self): """Test updating a single field creates new state object.""" original_state = _ConfigurationState() updated_state = original_state.with_updates(etag="new-etag") - + # Original state unchanged self.assertEqual(original_state.etag, "") # New state has updated value @@ -49,18 +49,15 @@ def test_with_updates_multiple_fields(self): """Test updating multiple fields creates new state object.""" original_state = _ConfigurationState() updated_state = original_state.with_updates( - etag="test-etag", - refresh_interval=60, - version_cache=5, - settings_cache={"key": "value"} + etag="test-etag", refresh_interval=60, version_cache=5, settings_cache={"key": "value"} ) - + # Original state unchanged self.assertEqual(original_state.etag, "") self.assertEqual(original_state.refresh_interval, _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS) self.assertEqual(original_state.version_cache, -1) self.assertEqual(original_state.settings_cache, {}) - + # New state has all updated values self.assertEqual(updated_state.etag, "test-etag") self.assertEqual(updated_state.refresh_interval, 60) @@ -71,9 +68,9 @@ def test_settings_cache_isolation(self): """Test that settings_cache is properly isolated between state objects.""" original_state = _ConfigurationState() original_state.settings_cache["original"] = "value" - + updated_state = original_state.with_updates(settings_cache={"new": "value"}) - + # Original and updated states should be isolated self.assertEqual(original_state.settings_cache, {"original": "value"}) self.assertEqual(updated_state.settings_cache, {"new": "value"}) @@ -86,10 +83,11 @@ def setUp(self): """Reset singleton state before each test.""" # Clear any existing singleton instance from azure.monitor.opentelemetry.exporter._utils import Singleton + if _ConfigurationManager in Singleton._instances: # Shutdown existing instance first existing_instance = Singleton._instances[_ConfigurationManager] - if hasattr(existing_instance, '_configuration_worker') and existing_instance._configuration_worker: + if hasattr(existing_instance, "_configuration_worker") and existing_instance._configuration_worker: existing_instance.shutdown() if _ConfigurationManager in Singleton._instances: del Singleton._instances[_ConfigurationManager] @@ -97,47 +95,48 @@ def setUp(self): def tearDown(self): """Clean up after each test.""" from azure.monitor.opentelemetry.exporter._utils import Singleton + if _ConfigurationManager in Singleton._instances: # Shutdown the instance instance = Singleton._instances[_ConfigurationManager] - if hasattr(instance, '_configuration_worker') and instance._configuration_worker: + if hasattr(instance, "_configuration_worker") and instance._configuration_worker: instance.shutdown() if _ConfigurationManager in Singleton._instances: del Singleton._instances[_ConfigurationManager] - @patch('azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker') + @patch("azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker") def test_singleton_pattern(self, mock_worker_class): """Test that ConfigurationManager follows singleton pattern.""" # Create first instance manager1 = _ConfigurationManager() - + # Create second instance manager2 = _ConfigurationManager() manager1.initialize() manager2.initialize() - + # Should be the same instance self.assertIs(manager1, manager2) - + # Worker should only be initialized once mock_worker_class.assert_called_once() - @patch('azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker') + @patch("azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker") def test_worker_initialization(self, mock_worker_class): """Test that ConfigurationWorker is initialized properly.""" mock_worker_instance = Mock() mock_worker_class.return_value = mock_worker_instance - + manager = _ConfigurationManager() manager.initialize() - + # Verify worker was created with manager and default refresh interval mock_worker_class.assert_called_once_with(manager, _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS) self.assertEqual(manager._configuration_worker, mock_worker_instance) - @patch('azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request') - @patch('azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker') + @patch("azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request") + @patch("azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker") def test_get_configuration_basic_success(self, mock_worker_class, mock_request): """Test basic successful configuration retrieval without CONFIG fetch.""" # Setup - Use version -1 to match initial state, no CONFIG fetch @@ -146,52 +145,44 @@ def test_get_configuration_basic_success(self, mock_worker_class, mock_request): refresh_interval=1800, settings={"key": "value"}, version=-1, # Same as initial version, no CONFIG fetch - status_code=200 + status_code=200, ) mock_request.return_value = mock_response - + manager = _ConfigurationManager() manager.initialize() - + # Execute result = manager.get_configuration_and_refresh_interval({"param": "value"}) - + # Verify return value self.assertEqual(result, 1800) - + # Verify only one request was made (to CHANGE endpoint only) mock_request.assert_called_once() call_args = mock_request.call_args self.assertEqual(call_args[0][0], _ONE_SETTINGS_CHANGE_URL) # URL self.assertEqual(call_args[0][1], {"param": "value"}) # query_dict - @patch('azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request') - @patch('azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker') + @patch("azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request") + @patch("azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker") def test_etag_headers_included(self, mock_worker_class, mock_request): """Test that etag is included in request headers.""" # Setup - first call sets etag - mock_response1 = OneSettingsResponse( - etag="test-etag", - refresh_interval=1800, - status_code=200 - ) + mock_response1 = OneSettingsResponse(etag="test-etag", refresh_interval=1800, status_code=200) mock_request.return_value = mock_response1 - + manager = _ConfigurationManager() manager.initialize() manager.get_configuration_and_refresh_interval() - + # Setup - second call should include etag - mock_response2 = OneSettingsResponse( - etag="new-etag", - refresh_interval=2400, - status_code=200 - ) + mock_response2 = OneSettingsResponse(etag="new-etag", refresh_interval=2400, status_code=200) mock_request.return_value = mock_response2 - + # Execute second call manager.get_configuration_and_refresh_interval() - + # Verify second call included etag in headers self.assertEqual(mock_request.call_count, 2) second_call_args = mock_request.call_args @@ -199,27 +190,19 @@ def test_etag_headers_included(self, mock_worker_class, mock_request): self.assertEqual(headers["If-None-Match"], "test-etag") self.assertEqual(headers["x-ms-onesetinterval"], "1800") - @patch('azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request') - @patch('azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker') + @patch("azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request") + @patch("azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker") def test_version_increase_triggers_config_fetch(self, mock_worker_class, mock_request): """Test that version increase triggers CONFIG endpoint fetch.""" manager = _ConfigurationManager() manager.initialize() - + # Mock responses for CHANGE and CONFIG endpoints change_response = OneSettingsResponse( - etag="test-etag", - refresh_interval=1800, - settings={"key": "value"}, - version=5, - status_code=200 - ) - config_response = OneSettingsResponse( - settings={"key": "config_value"}, - version=5, - status_code=200 + etag="test-etag", refresh_interval=1800, settings={"key": "value"}, version=5, status_code=200 ) - + config_response = OneSettingsResponse(settings={"key": "config_value"}, version=5, status_code=200) + # Configure mock to return different responses for different URLs def mock_request_side_effect(url, query_dict, headers=None): if url == _ONE_SETTINGS_CHANGE_URL: @@ -227,30 +210,30 @@ def mock_request_side_effect(url, query_dict, headers=None): elif url == _ONE_SETTINGS_CONFIG_URL: return config_response return OneSettingsResponse() - + mock_request.side_effect = mock_request_side_effect - + # Execute result = manager.get_configuration_and_refresh_interval() - + # Verify both endpoints were called self.assertEqual(mock_request.call_count, 2) - + # Verify first call was to CHANGE endpoint first_call = mock_request.call_args_list[0] self.assertEqual(first_call[0][0], _ONE_SETTINGS_CHANGE_URL) - + # Verify second call was to CONFIG endpoint second_call = mock_request.call_args_list[1] self.assertEqual(second_call[0][0], _ONE_SETTINGS_CONFIG_URL) - + # Verify state was updated with CONFIG response self.assertEqual(manager.get_current_version(), 5) self.assertEqual(manager.get_settings(), {"key": "config_value"}) self.assertEqual(result, 1800) - @patch('azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request') - @patch('azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker') + @patch("azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request") + @patch("azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker") def test_version_same_no_config_fetch(self, mock_worker_class, mock_request): """Test that same version does not trigger CONFIG fetch.""" manager = _ConfigurationManager() @@ -261,284 +244,263 @@ def test_version_same_no_config_fetch(self, mock_worker_class, mock_request): # First call to establish version cache first_response = OneSettingsResponse( - etag="first-etag", - refresh_interval=2500, - settings={"key": "first_value"}, - version=3, - status_code=200 + etag="first-etag", refresh_interval=2500, settings={"key": "first_value"}, version=3, status_code=200 ) mock_request.return_value = first_response result = manager.get_configuration_and_refresh_interval() - + # Verify that ALL calls were to CHANGE endpoint only all_calls = mock_request.call_args_list for call in all_calls: self.assertEqual(call[0][0], _ONE_SETTINGS_CHANGE_URL) - + self.assertEqual(result, 2500) - - @patch('azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request') - @patch('azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker') - @patch('azure.monitor.opentelemetry.exporter._configuration.logger') + + @patch("azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request") + @patch("azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker") + @patch("azure.monitor.opentelemetry.exporter._configuration.logger") def test_version_decrease_warning(self, mock_logger, mock_worker_class, mock_request): """Test warning when version decreases.""" manager = _ConfigurationManager() manager.initialize() - + # Set initial state with higher version manager._current_state.refresh_interval = 1800 manager._current_state.etag = "first_etag" manager._current_state.version_cache = 5 - + # Call with lower version lower_version_response = OneSettingsResponse( etag="second-etag", refresh_interval=2400, settings={"key": "second_value"}, version=3, # Lower version - status_code=200 + status_code=200, ) mock_request.return_value = lower_version_response - + # Execute call manager.get_configuration_and_refresh_interval() - + # Verify warning was logged mock_logger.warning.assert_called() warning_message = mock_logger.warning.call_args[0][0] self.assertIn("lower than cached version", warning_message) - + # Verify that ALL calls were to CHANGE endpoint only all_calls = mock_request.call_args_list for call in all_calls: self.assertEqual(call[0][0], _ONE_SETTINGS_CHANGE_URL) - @patch('azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request') - @patch('azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker') + @patch("azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request") + @patch("azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker") def test_304_not_modified_response(self, mock_worker_class, mock_request): """Test handling of 304 Not Modified response.""" manager = _ConfigurationManager() manager.initialize() - + # First call to establish etag and state first_response = OneSettingsResponse( - etag="test-etag", - refresh_interval=1800, - settings={"key": "value"}, - version=2, - status_code=200 + etag="test-etag", refresh_interval=1800, settings={"key": "value"}, version=2, status_code=200 ) - config_response = OneSettingsResponse( - settings={"key": "config_value"}, - version=2, - status_code=200 - ) - + config_response = OneSettingsResponse(settings={"key": "config_value"}, version=2, status_code=200) + def first_call_side_effect(url, query_dict, headers=None): if url == _ONE_SETTINGS_CHANGE_URL: return first_response elif url == _ONE_SETTINGS_CONFIG_URL: return config_response return OneSettingsResponse() - + mock_request.side_effect = first_call_side_effect first_result = manager.get_configuration_and_refresh_interval() - + # Reset mock for 304 response mock_request.reset_mock() - + # Second call returns 304 Not Modified - not_modified_response = OneSettingsResponse( - status_code=304 - ) + not_modified_response = OneSettingsResponse(status_code=304) mock_request.return_value = not_modified_response - + # Execute second call second_result = manager.get_configuration_and_refresh_interval() - + # Verify 304 response preserves previous refresh interval (both should be 1800) self.assertEqual(first_result, 1800) self.assertEqual(second_result, 1800) # Should preserve the original interval - + # Verify only CHANGE endpoint was called mock_request.assert_called_once() call_args = mock_request.call_args self.assertEqual(call_args[0][0], _ONE_SETTINGS_CHANGE_URL) - + # Verify etag was included in headers headers = call_args[0][2] self.assertEqual(headers["If-None-Match"], "test-etag") - @patch('azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request') - @patch('azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker') - @patch('azure.monitor.opentelemetry.exporter._configuration.logger') + @patch("azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request") + @patch("azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker") + @patch("azure.monitor.opentelemetry.exporter._configuration.logger") def test_transient_error_timeout(self, mock_logger, mock_worker_class, mock_request): """Test transient error handling for timeout.""" manager = _ConfigurationManager() manager.initialize() - + # Set initial refresh interval with manager._state_lock: manager._current_state = manager._current_state.with_updates(refresh_interval=1800) - + # Setup timeout response timeout_response = OneSettingsResponse( - has_exception=True, - status_code=200 # Default status when exception occurs + has_exception=True, status_code=200 # Default status when exception occurs ) mock_request.return_value = timeout_response - + # Execute result = manager.get_configuration_and_refresh_interval() - + # Verify refresh interval was doubled self.assertEqual(result, 3600) # 1800 * 2 - + # Verify only CHANGE endpoint was called (no CONFIG fetch on error) mock_request.assert_called_once() call_args = mock_request.call_args self.assertEqual(call_args[0][0], _ONE_SETTINGS_CHANGE_URL) - + # Verify warning was logged mock_logger.warning.assert_called() warning_message = mock_logger.warning.call_args[0][0] self.assertIn("transient error", warning_message) - @patch('azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request') - @patch('azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker') - @patch('azure.monitor.opentelemetry.exporter._configuration.logger') + @patch("azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request") + @patch("azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker") + @patch("azure.monitor.opentelemetry.exporter._configuration.logger") def test_transient_error_network_exception(self, mock_logger, mock_worker_class, mock_request): """Test transient error handling for network exception.""" manager = _ConfigurationManager() manager.initialize() - + # Set initial refresh interval with manager._state_lock: manager._current_state = manager._current_state.with_updates(refresh_interval=900) - + # Setup network exception response - exception_response = OneSettingsResponse( - has_exception=True, - status_code=200 - ) + exception_response = OneSettingsResponse(has_exception=True, status_code=200) mock_request.return_value = exception_response - + # Execute result = manager.get_configuration_and_refresh_interval() - + # Verify refresh interval was doubled self.assertEqual(result, 1800) # 900 * 2 - + # Verify warning was logged with correct error type mock_logger.warning.assert_called() warning_message = mock_logger.warning.call_args[0][0] self.assertIn("transient error", warning_message) - @patch('azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request') - @patch('azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker') - @patch('azure.monitor.opentelemetry.exporter._configuration.logger') + @patch("azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request") + @patch("azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker") + @patch("azure.monitor.opentelemetry.exporter._configuration.logger") def test_transient_error_http_status_codes(self, mock_logger, mock_worker_class, mock_request): """Test transient error handling for various HTTP status codes.""" manager = _ConfigurationManager() manager.initialize() - + # Test various retryable status codes test_cases = [429, 500, 502, 503, 504, 408, 401, 403] - + for status_code in test_cases: with self.subTest(status_code=status_code): # Reset mock and set initial refresh interval mock_request.reset_mock() mock_logger.reset_mock() - + with manager._state_lock: manager._current_state = manager._current_state.with_updates(refresh_interval=1200) - + # Setup HTTP error response http_error_response = OneSettingsResponse( status_code=status_code, has_exception=False, ) mock_request.return_value = http_error_response - + # Execute result = manager.get_configuration_and_refresh_interval() - + # Verify refresh interval was doubled self.assertEqual(result, 2400) # 1200 * 2 - + # Verify warning was logged with correct status code mock_logger.warning.assert_called() warning_message = mock_logger.warning.call_args[0][0] self.assertIn("transient error", warning_message) - @patch('azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request') - @patch('azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker') - @patch('azure.monitor.opentelemetry.exporter._configuration.logger') + @patch("azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request") + @patch("azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker") + @patch("azure.monitor.opentelemetry.exporter._configuration.logger") def test_transient_error_refresh_interval_cap(self, mock_logger, mock_worker_class, mock_request): """Test that refresh interval is capped at 24 hours for transient errors.""" manager = _ConfigurationManager() manager.initialize() - + # Set initial refresh interval to a high value that would exceed cap when doubled high_refresh_interval = _ONE_SETTINGS_MAX_REFRESH_INTERVAL_SECONDS // 2 + 1000 # Will exceed cap when doubled - + with manager._state_lock: manager._current_state = manager._current_state.with_updates(refresh_interval=high_refresh_interval) - + # Setup timeout response - timeout_response = OneSettingsResponse( - has_exception=True, - status_code=200 - ) + timeout_response = OneSettingsResponse(has_exception=True, status_code=200) mock_request.return_value = timeout_response - + # Execute result = manager.get_configuration_and_refresh_interval() - + # Verify refresh interval was capped at 24 hours self.assertEqual(result, _ONE_SETTINGS_MAX_REFRESH_INTERVAL_SECONDS) - + # Verify warning was logged mentioning transient error mock_logger.warning.assert_called() warning_message = mock_logger.warning.call_args[0][0] self.assertIn("transient", warning_message) - @patch('azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request') - @patch('azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker') + @patch("azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request") + @patch("azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker") def test_non_transient_error_no_backoff(self, mock_worker_class, mock_request): """Test that non-transient errors don't trigger backoff.""" manager = _ConfigurationManager() manager.initialize() - + # Set initial refresh interval with manager._state_lock: manager._current_state = manager._current_state.with_updates(refresh_interval=1800) - + # Setup non-retryable HTTP error response (e.g., 400 Bad Request) bad_request_response = OneSettingsResponse( status_code=400, # Not in _RETRYABLE_STATUS_CODES has_exception=False, - refresh_interval=1800 # Should remain unchanged + refresh_interval=1800, # Should remain unchanged ) mock_request.return_value = bad_request_response - + # Execute result = manager.get_configuration_and_refresh_interval() # Verify refresh interval was updated to response value self.assertEqual(result, 1800) - + # Verify only CHANGE endpoint was called mock_request.assert_called_once() - @patch('azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request') - @patch('azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker') + @patch("azure.monitor.opentelemetry.exporter._configuration.make_onesettings_request") + @patch("azure.monitor.opentelemetry.exporter._configuration._worker._ConfigurationWorker") def test_successful_request_after_transient_error(self, mock_worker_class, mock_request): """Test that successful requests don't double refresh interval.""" manager = _ConfigurationManager() manager.initialize() - + # Setup successful response success_response = OneSettingsResponse( etag="test-etag", @@ -549,17 +511,17 @@ def test_successful_request_after_transient_error(self, mock_worker_class, mock_ has_exception=False, ) mock_request.return_value = success_response - + # Execute result = manager.get_configuration_and_refresh_interval() - + # Verify refresh interval is not doubled (uses response value) self.assertEqual(result, 1800) @patch.dict(os.environ, {"APPLICATIONINSIGHTS_CONTROLPLANE_DISABLED": "true"}) def test_configuration_manager_disabled(self): """Test that configuration manager is disabled when environment variable is set.""" - + # When controlplane is disabled, get_configuration_manager should return None manager = get_configuration_manager() self.assertIsNone(manager) @@ -567,7 +529,7 @@ def test_configuration_manager_disabled(self): @patch.dict(os.environ, {"APPLICATIONINSIGHTS_CONTROLPLANE_DISABLED": "false"}) def test_configuration_manager_enabled(self): """Test that configuration manager is enabled when environment variable is false.""" - + # When controlplane is not disabled, get_configuration_manager should return instance manager = get_configuration_manager() self.assertIsNotNone(manager) @@ -575,16 +537,16 @@ def test_configuration_manager_enabled(self): def test_configuration_manager_enabled_by_default(self): """Test that configuration manager is enabled by default when no environment variable is set.""" - + # Ensure env var is not set if "APPLICATIONINSIGHTS_CONTROLPLANE_DISABLED" in os.environ: del os.environ["APPLICATIONINSIGHTS_CONTROLPLANE_DISABLED"] - + # When no env var is set, get_configuration_manager should return instance manager = get_configuration_manager() self.assertIsNotNone(manager) self.assertIsInstance(manager, _ConfigurationManager) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/configuration/test_utils.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/configuration/test_utils.py index c97a8a984562..4d97f41a62eb 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/configuration/test_utils.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/configuration/test_utils.py @@ -36,15 +36,8 @@ def setUp(self): def test_fill_empty_profile(self): """Test filling an empty profile with all parameters.""" - _ConfigurationProfile.fill( - os="w", - rp="f", - attach="m", - version="1.0.0", - component="ext", - region="westus" - ) - + _ConfigurationProfile.fill(os="w", rp="f", attach="m", version="1.0.0", component="ext", region="westus") + self.assertEqual(_ConfigurationProfile.os, "w") self.assertEqual(_ConfigurationProfile.rp, "f") self.assertEqual(_ConfigurationProfile.attach, "m") @@ -55,7 +48,7 @@ def test_fill_empty_profile(self): def test_fill_partial_profile(self): """Test filling profile with only some parameters.""" _ConfigurationProfile.fill(os="l", version="2.0.0") - + self.assertEqual(_ConfigurationProfile.os, "l") self.assertEqual(_ConfigurationProfile.version, "2.0.0") self.assertEqual(_ConfigurationProfile.rp, "") @@ -68,10 +61,10 @@ def test_fill_no_overwrite(self): # Set initial values _ConfigurationProfile.os = "w" _ConfigurationProfile.version = "1.0.0" - + # Try to overwrite - should be ignored _ConfigurationProfile.fill(os="l", version="2.0.0", rp="f") - + # Original values should be preserved self.assertEqual(_ConfigurationProfile.os, "w") self.assertEqual(_ConfigurationProfile.version, "1.0.0") @@ -85,7 +78,7 @@ class TestOneSettingsResponse(unittest.TestCase): def test_default_initialization(self): """Test OneSettingsResponse with default values.""" response = OneSettingsResponse() - + self.assertIsNone(response.etag) self.assertEqual(response.refresh_interval, _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS) self.assertEqual(response.settings, {}) @@ -97,13 +90,9 @@ def test_custom_initialization(self): """Test OneSettingsResponse with custom values.""" settings = {"key": "value"} response = OneSettingsResponse( - etag="test-etag", - refresh_interval=3600, - settings=settings, - version=5, - status_code=304 + etag="test-etag", refresh_interval=3600, settings=settings, version=5, status_code=304 ) - + self.assertEqual(response.etag, "test-etag") self.assertEqual(response.refresh_interval, 3600) self.assertEqual(response.settings, settings) @@ -113,11 +102,8 @@ def test_custom_initialization(self): def test_exception_initialization(self): """Test OneSettingsResponse with exception indicator.""" - response = OneSettingsResponse( - has_exception=True, - status_code=500 - ) - + response = OneSettingsResponse(has_exception=True, status_code=500) + self.assertIsNone(response.etag) self.assertEqual(response.refresh_interval, _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS) self.assertEqual(response.settings, {}) @@ -127,10 +113,8 @@ def test_exception_initialization(self): def test_timeout_initialization(self): """Test OneSettingsResponse with timeout indicator.""" - response = OneSettingsResponse( - has_exception=True - ) - + response = OneSettingsResponse(has_exception=True) + self.assertIsNone(response.etag) self.assertEqual(response.refresh_interval, _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS) self.assertEqual(response.settings, {}) @@ -144,7 +128,7 @@ def test_all_error_indicators(self): status_code=408, has_exception=True, ) - + self.assertEqual(response.status_code, 408) self.assertTrue(response.has_exception) @@ -152,32 +136,26 @@ def test_all_error_indicators(self): class TestMakeOneSettingsRequest(unittest.TestCase): """Test cases for make_onesettings_request function.""" - @patch('azure.monitor.opentelemetry.exporter._configuration._utils.requests.get') + @patch("azure.monitor.opentelemetry.exporter._configuration._utils.requests.get") def test_successful_request(self, mock_get): """Test successful OneSettings request.""" # Setup mock response mock_response = Mock() mock_response.status_code = 200 - mock_response.headers = { - "ETag": "test-etag", - "x-ms-onesetinterval": "30" - } - mock_response.content = json.dumps({ - "settings": {"key": "value", _ONE_SETTINGS_CHANGE_VERSION_KEY: "5"} - }).encode('utf-8') + mock_response.headers = {"ETag": "test-etag", "x-ms-onesetinterval": "30"} + mock_response.content = json.dumps( + {"settings": {"key": "value", _ONE_SETTINGS_CHANGE_VERSION_KEY: "5"}} + ).encode("utf-8") mock_get.return_value = mock_response - + # Make request result = make_onesettings_request("http://test.com", {"param": "value"}, {"header": "value"}) - + # Verify request was made correctly mock_get.assert_called_once_with( - "http://test.com", - params={"param": "value"}, - headers={"header": "value"}, - timeout=10 + "http://test.com", params={"param": "value"}, headers={"header": "value"}, timeout=10 ) - + # Verify response self.assertEqual(result.etag, "test-etag") self.assertEqual(result.refresh_interval, 1800) # 30 minutes * 60 @@ -186,13 +164,13 @@ def test_successful_request(self, mock_get): self.assertEqual(result.status_code, 200) self.assertFalse(result.has_exception) - @patch('azure.monitor.opentelemetry.exporter._configuration._utils.requests.get') + @patch("azure.monitor.opentelemetry.exporter._configuration._utils.requests.get") def test_request_timeout_exception(self, mock_get): """Test OneSettings request with timeout exception.""" mock_get.side_effect = requests.exceptions.Timeout("Request timed out") - + result = make_onesettings_request("http://test.com") - + # Should return response with timeout and exception indicators self.assertIsNone(result.etag) self.assertEqual(result.refresh_interval, _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS) @@ -201,13 +179,13 @@ def test_request_timeout_exception(self, mock_get): self.assertEqual(result.status_code, 200) self.assertTrue(result.has_exception) - @patch('azure.monitor.opentelemetry.exporter._configuration._utils.requests.get') + @patch("azure.monitor.opentelemetry.exporter._configuration._utils.requests.get") def test_request_connection_exception(self, mock_get): """Test OneSettings request with connection exception.""" mock_get.side_effect = requests.exceptions.ConnectionError("Connection failed") - + result = make_onesettings_request("http://test.com") - + # Should return response with exception indicator but no timeout self.assertIsNone(result.etag) self.assertEqual(result.refresh_interval, _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS) @@ -216,13 +194,13 @@ def test_request_connection_exception(self, mock_get): self.assertEqual(result.status_code, 200) self.assertTrue(result.has_exception) - @patch('azure.monitor.opentelemetry.exporter._configuration._utils.requests.get') + @patch("azure.monitor.opentelemetry.exporter._configuration._utils.requests.get") def test_request_http_exception(self, mock_get): """Test OneSettings request with HTTP exception.""" mock_get.side_effect = requests.exceptions.HTTPError("HTTP 500 Error") - + result = make_onesettings_request("http://test.com") - + # Should return response with exception indicator self.assertIsNone(result.etag) self.assertEqual(result.refresh_interval, _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS) @@ -231,13 +209,13 @@ def test_request_http_exception(self, mock_get): self.assertEqual(result.status_code, 200) self.assertTrue(result.has_exception) - @patch('azure.monitor.opentelemetry.exporter._configuration._utils.requests.get') + @patch("azure.monitor.opentelemetry.exporter._configuration._utils.requests.get") def test_request_generic_exception(self, mock_get): """Test OneSettings request with generic exception.""" mock_get.side_effect = Exception("Unexpected error") - + result = make_onesettings_request("http://test.com") - + # Should return response with exception indicator self.assertIsNone(result.etag) self.assertEqual(result.refresh_interval, _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS) @@ -246,8 +224,8 @@ def test_request_generic_exception(self, mock_get): self.assertEqual(result.status_code, 200) self.assertTrue(result.has_exception) - @patch('azure.monitor.opentelemetry.exporter._configuration._utils.requests.get') - @patch('azure.monitor.opentelemetry.exporter._configuration._utils._parse_onesettings_response') + @patch("azure.monitor.opentelemetry.exporter._configuration._utils.requests.get") + @patch("azure.monitor.opentelemetry.exporter._configuration._utils._parse_onesettings_response") def test_json_decode_exception(self, mock_parse, mock_get): """Test OneSettings request with JSON decode exception.""" # Setup mock response @@ -256,13 +234,14 @@ def test_json_decode_exception(self, mock_parse, mock_get): mock_response.headers = {"ETag": "test-etag"} mock_response.content = b"invalid json content" mock_get.return_value = mock_response - + # Mock _parse_onesettings_response to raise JSONDecodeError from json import JSONDecodeError + mock_parse.side_effect = JSONDecodeError("Expecting value", "invalid json content", 0) - + result = make_onesettings_request("http://test.com") - + # Should return response with exception indicator self.assertIsNone(result.etag) self.assertEqual(result.refresh_interval, _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS) @@ -271,33 +250,32 @@ def test_json_decode_exception(self, mock_parse, mock_get): self.assertEqual(result.status_code, 200) self.assertTrue(result.has_exception) - - @patch('azure.monitor.opentelemetry.exporter._configuration._utils.requests.get') + @patch("azure.monitor.opentelemetry.exporter._configuration._utils.requests.get") def test_http_error_status_codes(self, mock_get): """Test OneSettings request with various HTTP error status codes.""" # Test different HTTP error codes error_codes = [400, 401, 403, 404, 429, 500, 502, 503, 504] - + for status_code in error_codes: with self.subTest(status_code=status_code): mock_response = Mock() mock_response.status_code = status_code mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError(f"HTTP {status_code}") mock_get.return_value = mock_response - + result = make_onesettings_request("http://test.com") - + # Should return response with exception indicator self.assertTrue(result.has_exception) self.assertEqual(result.status_code, 200) # Default status when exception occurs - @patch('azure.monitor.opentelemetry.exporter._configuration._utils.requests.get') + @patch("azure.monitor.opentelemetry.exporter._configuration._utils.requests.get") def test_request_exception_legacy(self, mock_get): """Test OneSettings request with network exception (legacy behavior test).""" mock_get.side_effect = requests.exceptions.RequestException("Network error") - + result = make_onesettings_request("http://test.com") - + # Should return response with exception indicator self.assertIsNone(result.etag) self.assertEqual(result.refresh_interval, _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS) @@ -314,16 +292,13 @@ def test_parse_200_response(self): """Test parsing successful 200 response.""" mock_response = Mock() mock_response.status_code = 200 - mock_response.headers = { - "ETag": "test-etag", - "x-ms-onesetinterval": "45" - } - mock_response.content = json.dumps({ - "settings": {"feature": "enabled", _ONE_SETTINGS_CHANGE_VERSION_KEY: "10"} - }).encode('utf-8') - + mock_response.headers = {"ETag": "test-etag", "x-ms-onesetinterval": "45"} + mock_response.content = json.dumps( + {"settings": {"feature": "enabled", _ONE_SETTINGS_CHANGE_VERSION_KEY: "10"}} + ).encode("utf-8") + result = _parse_onesettings_response(mock_response) - + self.assertEqual(result.etag, "test-etag") self.assertEqual(result.refresh_interval, 2700) # 45 minutes * 60 self.assertEqual(result.settings, {"feature": "enabled", _ONE_SETTINGS_CHANGE_VERSION_KEY: "10"}) @@ -334,14 +309,11 @@ def test_parse_304_response(self): """Test parsing 304 Not Modified response.""" mock_response = Mock() mock_response.status_code = 304 - mock_response.headers = { - "ETag": "cached-etag", - "x-ms-onesetinterval": "60" - } + mock_response.headers = {"ETag": "cached-etag", "x-ms-onesetinterval": "60"} mock_response.content = b"" - + result = _parse_onesettings_response(mock_response) - + self.assertEqual(result.etag, "cached-etag") self.assertEqual(result.refresh_interval, 3600) # 60 minutes * 60 self.assertEqual(result.settings, {}) @@ -354,9 +326,9 @@ def test_parse_invalid_json(self): mock_response.status_code = 200 mock_response.headers = {} mock_response.content = b"invalid json" - + result = _parse_onesettings_response(mock_response) - + self.assertIsNone(result.etag) self.assertEqual(result.refresh_interval, _ONE_SETTINGS_DEFAULT_REFRESH_INTERVAL_SECONDS) self.assertEqual(result.settings, {}) @@ -387,51 +359,29 @@ def tearDown(self): def test_feature_enabled_by_default(self): """Test feature that is enabled by default with no overrides.""" - settings = { - "test_feature": { - "default": "enabled" - } - } - + settings = {"test_feature": {"default": "enabled"}} + result = evaluate_feature("test_feature", settings) self.assertTrue(result) def test_feature_disabled_by_default(self): """Test feature that is disabled by default with no overrides.""" - settings = { - "test_feature": { - "default": "disabled" - } - } - + settings = {"test_feature": {"default": "disabled"}} + result = evaluate_feature("test_feature", settings) self.assertFalse(result) def test_feature_override_matches(self): """Test feature override that matches current profile.""" - settings = { - "test_feature": { - "default": "disabled", - "override": [ - {"os": "w", "component": "ext"} - ] - } - } - + settings = {"test_feature": {"default": "disabled", "override": [{"os": "w", "component": "ext"}]}} + result = evaluate_feature("test_feature", settings) self.assertTrue(result) # Override flips disabled to enabled def test_feature_override_no_match(self): """Test feature override that doesn't match current profile.""" - settings = { - "test_feature": { - "default": "enabled", - "override": [ - {"os": "l", "component": "dst"} - ] - } - } - + settings = {"test_feature": {"default": "enabled", "override": [{"os": "l", "component": "dst"}]}} + result = evaluate_feature("test_feature", settings) self.assertTrue(result) # No override, stays default @@ -443,11 +393,11 @@ def test_feature_multiple_overrides(self): "override": [ {"os": "l"}, # Doesn't match {"component": "ext", "rp": "f"}, # Matches - {"region": "eastus"} # Doesn't match - ] + {"region": "eastus"}, # Doesn't match + ], } } - + result = evaluate_feature("test_feature", settings) self.assertTrue(result) # Second override matches @@ -455,13 +405,13 @@ def test_invalid_inputs(self): """Test evaluate_feature with invalid inputs.""" # Empty feature key self.assertIsNone(evaluate_feature("", {})) - + # None settings self.assertIsNone(evaluate_feature("test", None)) - + # Feature not in settings self.assertIsNone(evaluate_feature("missing", {})) - + # Invalid feature config self.assertIsNone(evaluate_feature("test", {"test": "invalid"})) @@ -586,11 +536,11 @@ def test_basic_version_comparison(self): # Greater than self.assertTrue(_compare_versions("2.0.0", "1.0.0", ">=")) self.assertTrue(_compare_versions("1.1.0", "1.0.0", ">")) - + # Less than self.assertTrue(_compare_versions("1.0.0", "2.0.0", "<=")) self.assertTrue(_compare_versions("1.0.0", "1.1.0", "<")) - + # Equal self.assertTrue(_compare_versions("1.0.0", "1.0.0", "==")) self.assertTrue(_compare_versions("1.0.0", "1.0.0", ">=")) @@ -601,7 +551,7 @@ def test_beta_version_comparison(self): # Beta vs beta self.assertTrue(_compare_versions("1.0.0b2", "1.0.0b1", ">")) self.assertTrue(_compare_versions("1.0.0b1", "1.0.0b2", "<")) - + # Beta vs release self.assertTrue(_compare_versions("1.0.0", "1.0.0b1", ">")) self.assertTrue(_compare_versions("1.0.0b1", "1.0.0", "<")) @@ -619,7 +569,7 @@ class TestParseVersionWithBeta(unittest.TestCase): def test_release_version(self): """Test parsing release version.""" result = _parse_version_with_beta("1.2.3") - self.assertEqual(result, (1, 2, 3, float('inf'))) + self.assertEqual(result, (1, 2, 3, float("inf"))) def test_beta_version(self): """Test parsing beta version.""" @@ -638,11 +588,8 @@ class TestOneSettingsResponseErrorHandling(unittest.TestCase): def test_response_with_timeout_only(self): """Test response that indicates timeout but not general exception.""" # This scenario shouldn't normally happen but test for completeness - response = OneSettingsResponse( - has_exception=False, - status_code=408 - ) - + response = OneSettingsResponse(has_exception=False, status_code=408) + self.assertFalse(response.has_exception) self.assertEqual(response.status_code, 408) @@ -655,13 +602,10 @@ def test_response_error_combinations(self): (True, False, 408, "timeout without exception flag"), (False, False, 429, "no error flags but error status"), ] - + for has_timeout, has_exception, status_code, description in test_cases: with self.subTest(description=description): - response = OneSettingsResponse( - has_exception=has_exception, - status_code=status_code - ) + response = OneSettingsResponse(has_exception=has_exception, status_code=status_code) self.assertEqual(response.has_exception, has_exception) self.assertEqual(response.status_code, status_code) @@ -695,33 +639,33 @@ def test_complex_feature_evaluation(self): "override": [ {"os": "w"}, # This should match {"os": "l", "ver": {"min": "1.0.0b20"}}, - {"component": "dst", "rp": "f"} - ] + {"component": "dst", "rp": "f"}, + ], }, "sampling": { "default": "enabled", "override": [ {"os": ["w", "l"]}, # This should match and disable - ] + ], }, "profiling": { "default": "disabled", "override": [ {"os": "w", "ver": {"min": "2.0.0", "max": "3.0.0"}}, # Version doesn't match - {"component": "ext", "rp": ["f", "a"], "region": ["westus", "eastus"]} # All match - ] - } + {"component": "ext", "rp": ["f", "a"], "region": ["westus", "eastus"]}, # All match + ], + }, } - + # live_metrics: disabled by default, but Windows override matches self.assertTrue(evaluate_feature("live_metrics", settings)) - + # sampling: enabled by default, but OS override matches to disable self.assertFalse(evaluate_feature("sampling", settings)) - + # profiling: disabled by default, second override matches to enable self.assertTrue(evaluate_feature("profiling", settings)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/configuration/test_worker.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/configuration/test_worker.py index 3787d21f55f5..8b2198e37d44 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/configuration/test_worker.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/configuration/test_worker.py @@ -25,33 +25,32 @@ def tearDown(self): def test_init_with_default_refresh_interval(self): """Test worker initialization with default refresh interval.""" - with patch('random.uniform', return_value=0.1): # Short startup delay for tests + with patch("random.uniform", return_value=0.1): # Short startup delay for tests worker = _ConfigurationWorker(self.mock_configuration_manager) - + try: # Verify initial state self.assertEqual(worker._configuration_manager, self.mock_configuration_manager) self.assertEqual(worker._default_refresh_interval, 3600) self.assertEqual(worker._refresh_interval, 3600) self.assertTrue(worker._running) - + # Verify thread was created and started self.assertIsNotNone(worker._refresh_thread) self.assertTrue(worker._refresh_thread.is_alive()) self.assertTrue(worker._refresh_thread.daemon) self.assertEqual(worker._refresh_thread.name, "ConfigurationWorker") - finally: worker.shutdown() def test_init_with_custom_refresh_interval(self): """Test worker initialization with custom refresh interval.""" custom_interval = 900 - - with patch('random.uniform', return_value=0.1): + + with patch("random.uniform", return_value=0.1): worker = _ConfigurationWorker(self.mock_configuration_manager, custom_interval) - + try: self.assertEqual(worker._refresh_interval, custom_interval) self.assertEqual(worker.get_refresh_interval(), custom_interval) @@ -60,38 +59,38 @@ def test_init_with_custom_refresh_interval(self): def test_get_refresh_interval_thread_safe(self): """Test that get_refresh_interval is thread-safe.""" - with patch('random.uniform', return_value=0.1): + with patch("random.uniform", return_value=0.1): worker = _ConfigurationWorker(self.mock_configuration_manager, 1200) - + try: # Test from multiple threads results = [] - + def get_interval(): results.append(worker.get_refresh_interval()) - + threads = [threading.Thread(target=get_interval) for _ in range(10)] - + for thread in threads: thread.start() - + for thread in threads: thread.join() - + # All results should be the same self.assertEqual(len(set(results)), 1) self.assertEqual(results[0], 1200) - + finally: worker.shutdown() - @patch('random.uniform') + @patch("random.uniform") def test_startup_delay_range(self, mock_random): """Test that startup delay is applied with correct range.""" mock_random.return_value = 7.5 # Middle of range - + worker = _ConfigurationWorker(self.mock_configuration_manager) - + try: # Verify random.uniform was called with correct range mock_random.assert_called_once_with(5.0, 15.0) @@ -100,24 +99,24 @@ def test_startup_delay_range(self, mock_random): def test_configuration_refresh_called(self): """Test that configuration refresh is called with correct parameters.""" - with patch('random.uniform', return_value=0.001): # Very short delay + with patch("random.uniform", return_value=0.001): # Very short delay worker = _ConfigurationWorker(self.mock_configuration_manager, 0.01) # Very short interval - + try: # Wait for at least one refresh cycle with timeout max_wait = 1.0 # Maximum 1 second wait start_time = time.time() - + while time.time() - start_time < max_wait: if self.mock_configuration_manager.get_configuration_and_refresh_interval.called: break time.sleep(0.01) - + # Verify the configuration manager was called self.mock_configuration_manager.get_configuration_and_refresh_interval.assert_called_with( _ONE_SETTINGS_PYTHON_TARGETING ) - + finally: worker.shutdown() @@ -125,146 +124,146 @@ def test_refresh_interval_update(self): """Test that refresh interval is updated from configuration manager response.""" # Mock returns different intervals self.mock_configuration_manager.get_configuration_and_refresh_interval.side_effect = [1800, 3600] - - with patch('random.uniform', return_value=0.001): + + with patch("random.uniform", return_value=0.001): worker = _ConfigurationWorker(self.mock_configuration_manager, 0.01) - + try: # Wait for refresh cycles with timeout max_wait = 1.0 start_time = time.time() - + while time.time() - start_time < max_wait: if self.mock_configuration_manager.get_configuration_and_refresh_interval.call_count >= 1: break time.sleep(0.01) - + # Should have updated to the new interval current_interval = worker.get_refresh_interval() self.assertIn(current_interval, [1800, 3600]) # Could be either depending on timing - + finally: worker.shutdown() - @patch('azure.monitor.opentelemetry.exporter._configuration._worker.logger') + @patch("azure.monitor.opentelemetry.exporter._configuration._worker.logger") def test_exception_handling_in_refresh_loop(self, mock_logger): """Test that exceptions in refresh loop are handled gracefully.""" # Make the configuration manager raise an exception self.mock_configuration_manager.get_configuration_and_refresh_interval.side_effect = Exception("Test error") - - with patch('random.uniform', return_value=0.001): + + with patch("random.uniform", return_value=0.001): worker = _ConfigurationWorker(self.mock_configuration_manager, 0.01) - + try: # Wait for refresh cycles with timeout max_wait = 1.0 start_time = time.time() - + while time.time() - start_time < max_wait: if mock_logger.warning.called: break time.sleep(0.01) - + # Worker should still be running despite exception self.assertTrue(worker._running) self.assertTrue(worker._refresh_thread.is_alive()) - + # Error should be logged mock_logger.warning.assert_called() warning_call = mock_logger.warning.call_args[0] self.assertIn("Configuration refresh failed", warning_call[0]) self.assertIn("Test error", str(warning_call[1])) - + finally: worker.shutdown() def test_shutdown_graceful(self): """Test graceful shutdown of worker.""" - with patch('random.uniform', return_value=0.001): + with patch("random.uniform", return_value=0.001): worker = _ConfigurationWorker(self.mock_configuration_manager) - + # Verify worker is running self.assertTrue(worker._running) self.assertTrue(worker._refresh_thread.is_alive()) - + # Shutdown with timeout start_time = time.time() worker.shutdown() shutdown_time = time.time() - start_time - + # Verify shutdown state self.assertFalse(worker._running) self.assertTrue(worker._shutdown_event.is_set()) - + # Shutdown should be reasonably fast self.assertLess(shutdown_time, 2.0) # Should not take more than 2 seconds - + # Thread should be stopped self.assertFalse(worker._refresh_thread.is_alive()) def test_shutdown_idempotent(self): """Test that shutdown can be called multiple times safely.""" - with patch('random.uniform', return_value=0.01): + with patch("random.uniform", return_value=0.01): worker = _ConfigurationWorker(self.mock_configuration_manager) - + # First shutdown worker.shutdown() self.assertFalse(worker._running) - + # Second shutdown should not cause errors try: worker.shutdown() except Exception as e: self.fail(f"Second shutdown raised an exception: {e}") - + # State should remain shutdown self.assertFalse(worker._running) def test_shutdown_during_startup_delay(self): """Test shutdown during startup delay period.""" - with patch('random.uniform', return_value=1.0): # 1 second startup delay + with patch("random.uniform", return_value=1.0): # 1 second startup delay worker = _ConfigurationWorker(self.mock_configuration_manager) - + # Shutdown immediately with timeout start_time = time.time() worker.shutdown() shutdown_time = time.time() - start_time - + # Should shutdown cleanly even during startup delay self.assertFalse(worker._running) - + # Shutdown should be reasonably fast (much less than the startup delay) self.assertLess(shutdown_time, 0.5) # Should be much faster than 1 second startup delay - + # Thread should be stopped self.assertFalse(worker._refresh_thread.is_alive()) - + # Configuration manager should not have been called self.mock_configuration_manager.get_configuration_and_refresh_interval.assert_not_called() def test_shutdown_thread_safety(self): """Test that shutdown is thread-safe.""" - with patch('random.uniform', return_value=0.01): + with patch("random.uniform", return_value=0.01): worker = _ConfigurationWorker(self.mock_configuration_manager) - + # Shutdown from multiple threads shutdown_results = [] - + def shutdown_worker(): try: worker.shutdown() shutdown_results.append("success") except Exception as e: shutdown_results.append(f"error: {e}") - + threads = [threading.Thread(target=shutdown_worker) for _ in range(5)] - + for thread in threads: thread.start() - + for thread in threads: thread.join() - + # All shutdowns should succeed self.assertEqual(len(shutdown_results), 5) self.assertTrue(all(result == "success" for result in shutdown_results)) @@ -272,80 +271,78 @@ def shutdown_worker(): def test_daemon_thread_property(self): """Test that the worker thread is created as a daemon thread.""" - with patch('random.uniform', return_value=0.01): + with patch("random.uniform", return_value=0.01): worker = _ConfigurationWorker(self.mock_configuration_manager) - + try: # Verify thread is daemon self.assertTrue(worker._refresh_thread.daemon) finally: worker.shutdown() - @patch('threading.Thread') + @patch("threading.Thread") def test_thread_target_and_name(self, mock_thread_class): """Test that thread is created with correct target and name.""" mock_thread_instance = Mock() mock_thread_instance.is_alive.return_value = False # Simulate thread that doesn't start mock_thread_class.return_value = mock_thread_instance - - with patch('random.uniform', return_value=0.001): + + with patch("random.uniform", return_value=0.001): worker = _ConfigurationWorker(self.mock_configuration_manager) - + try: # Verify thread was created with correct parameters mock_thread_class.assert_called_once_with( - target=worker._get_configuration, - name="ConfigurationWorker", - daemon=True + target=worker._get_configuration, name="ConfigurationWorker", daemon=True ) - + # Verify thread was started mock_thread_instance.start.assert_called_once() - + finally: # Call shutdown to clean up, even though thread is mocked worker.shutdown() def test_configuration_targeting_parameter(self): """Test that the correct targeting parameter is passed.""" - with patch('random.uniform', return_value=0.001): + with patch("random.uniform", return_value=0.001): worker = _ConfigurationWorker(self.mock_configuration_manager, 0.01) - + try: # Wait for refresh with timeout max_wait = 1.0 start_time = time.time() - + while time.time() - start_time < max_wait: if self.mock_configuration_manager.get_configuration_and_refresh_interval.called: break time.sleep(0.01) - + # Verify correct parameter was passed self.mock_configuration_manager.get_configuration_and_refresh_interval.assert_called_with( _ONE_SETTINGS_PYTHON_TARGETING ) - + finally: worker.shutdown() def test_lock_protects_worker_state(self): """Test that single lock properly protects worker state access.""" - with patch('random.uniform', return_value=0.01): + with patch("random.uniform", return_value=0.01): worker = _ConfigurationWorker(self.mock_configuration_manager, 1000) - + try: # Test that get_refresh_interval works normally interval = worker.get_refresh_interval() self.assertEqual(interval, 1000) - + # Test thread safety by accessing from current thread # (We can't easily test cross-thread locking without risking deadlock) with worker._lock: # While holding lock, verify we can still access internal state self.assertEqual(worker._refresh_interval, 1000) self.assertTrue(worker._running) - + finally: worker.shutdown() @@ -356,29 +353,29 @@ def test_refresh_loop_continues_after_exception(self): Exception("First error"), 1500, Exception("Second error"), - 2000 + 2000, ] - - with patch('random.uniform', return_value=0.001): + + with patch("random.uniform", return_value=0.001): worker = _ConfigurationWorker(self.mock_configuration_manager, 0.005) - + try: # Wait for multiple refresh cycles with timeout max_wait = 1.0 start_time = time.time() - + while time.time() - start_time < max_wait: if self.mock_configuration_manager.get_configuration_and_refresh_interval.call_count >= 2: break time.sleep(0.01) - + # Should have called multiple times despite exceptions call_count = self.mock_configuration_manager.get_configuration_and_refresh_interval.call_count self.assertGreaterEqual(call_count, 1) # At least one call should have happened - + finally: worker.shutdown() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/customer_sdk_stats/test_customer_sdkstats.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/customer_sdk_stats/test_customer_sdkstats.py index 34a09289b630..1044668bb2a5 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/customer_sdk_stats/test_customer_sdkstats.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/customer_sdk_stats/test_customer_sdkstats.py @@ -24,7 +24,7 @@ def setUp(self): """Set up test environment and ensure customer SDKStats is enabled.""" # Enable customer SDK stats for testing os.environ[_APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW] = "true" - + # Reset the customer stats manager for each test manager = get_customer_stats_manager() manager.shutdown() @@ -33,7 +33,7 @@ def tearDown(self): """Clean up test environment.""" # Clean up environment variables os.environ.pop(_APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW, None) - + # Shutdown customer stats manager = get_customer_stats_manager() manager.shutdown() @@ -43,10 +43,10 @@ def test_collect_customer_sdkstats(self): # Create a mock exporter mock_exporter = mock.Mock() mock_exporter._connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" - + # Collect customer SDK stats collect_customer_sdkstats(mock_exporter) - + # Verify manager is initialized manager = get_customer_stats_manager() self.assertTrue(manager.is_initialized) @@ -56,12 +56,12 @@ def test_collect_customer_sdkstats_multiple_calls(self): # Create a mock exporter mock_exporter = mock.Mock() mock_exporter._connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" - + # Call collect multiple times collect_customer_sdkstats(mock_exporter) collect_customer_sdkstats(mock_exporter) collect_customer_sdkstats(mock_exporter) - + # Verify manager is still properly initialized manager = get_customer_stats_manager() self.assertTrue(manager.is_initialized) @@ -72,10 +72,10 @@ def test_shutdown_customer_sdkstats_metrics(self): manager = get_customer_stats_manager() manager.initialize("InstrumentationKey=12345678-1234-5678-abcd-12345678abcd") self.assertTrue(manager.is_initialized) - + # Shutdown shutdown_customer_sdkstats_metrics() - + # Verify shutdown self.assertFalse(manager.is_initialized) @@ -85,10 +85,10 @@ def test_shutdown_customer_sdkstats_metrics_when_not_initialized(self): manager = get_customer_stats_manager() manager.shutdown() self.assertFalse(manager.is_initialized) - + # Shutdown when already shut down should not cause issues shutdown_customer_sdkstats_metrics() - + # Verify still shut down self.assertFalse(manager.is_initialized) @@ -97,24 +97,24 @@ def test_collect_and_shutdown_cycle(self): # Create a mock exporter mock_exporter = mock.Mock() mock_exporter._connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" - + # Collect customer SDK stats collect_customer_sdkstats(mock_exporter) manager = get_customer_stats_manager() self.assertTrue(manager.is_initialized) - + # Shutdown shutdown_customer_sdkstats_metrics() self.assertFalse(manager.is_initialized) - + # Collect again collect_customer_sdkstats(mock_exporter) self.assertTrue(manager.is_initialized) - + # Final shutdown shutdown_customer_sdkstats_metrics() self.assertFalse(manager.is_initialized) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/customer_sdk_stats/test_manager.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/customer_sdk_stats/test_manager.py index 8de10ff2c690..07df020d4946 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/customer_sdk_stats/test_manager.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/customer_sdk_stats/test_manager.py @@ -32,11 +32,11 @@ def setUp(self): """Set up test environment.""" # Enable customer SDK stats for testing os.environ[_APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW] = "true" - + # Reset singleton state - only clear CustomerSdkStatsManager instances if CustomerSdkStatsManager in CustomerSdkStatsManager._instances: del CustomerSdkStatsManager._instances[CustomerSdkStatsManager] - + # Get a fresh manager instance self.manager = CustomerSdkStatsManager() @@ -44,13 +44,13 @@ def tearDown(self): """Clean up test environment.""" # Clean up environment variables os.environ.pop(_APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW, None) - + # Shutdown manager if needed try: self.manager.shutdown() except: pass - + # Reset singleton state - only clear CustomerSdkStatsManager instances if CustomerSdkStatsManager in CustomerSdkStatsManager._instances: del CustomerSdkStatsManager._instances[CustomerSdkStatsManager] @@ -66,20 +66,20 @@ def test_manager_initialization_disabled(self): """Test manager initialization when customer SDK stats is disabled.""" # Disable customer SDK stats os.environ[_APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW] = "false" - + # Create new manager with disabled state - if hasattr(CustomerSdkStatsManager, '_instances'): + if hasattr(CustomerSdkStatsManager, "_instances"): CustomerSdkStatsManager._instances = {} disabled_manager = CustomerSdkStatsManager() - + self.assertEqual(disabled_manager.status, CustomerSdkStatsStatus.DISABLED) self.assertFalse(disabled_manager.is_enabled) self.assertFalse(disabled_manager.is_initialized) self.assertFalse(disabled_manager.is_shutdown) - @patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter') - @patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader') - @patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider') + @patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter") + @patch("azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader") + @patch("azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider") def test_initialize_success(self, mock_meter_provider, mock_metric_reader, mock_exporter): """Test successful initialization of the manager.""" # Setup mocks @@ -87,24 +87,22 @@ def test_initialize_success(self, mock_meter_provider, mock_metric_reader, mock_ mock_meter_provider_instance = Mock() mock_meter_provider_instance.get_meter.return_value = mock_meter mock_meter_provider.return_value = mock_meter_provider_instance - + mock_success_gauge = Mock() mock_dropped_gauge = Mock() mock_retry_gauge = Mock() - mock_meter.create_observable_gauge.side_effect = [ - mock_success_gauge, mock_dropped_gauge, mock_retry_gauge - ] - + mock_meter.create_observable_gauge.side_effect = [mock_success_gauge, mock_dropped_gauge, mock_retry_gauge] + connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" - + # Test initialization result = self.manager.initialize(connection_string) - + self.assertTrue(result) self.assertEqual(self.manager.status, CustomerSdkStatsStatus.ACTIVE) self.assertTrue(self.manager.is_initialized) self.assertFalse(self.manager.is_shutdown) - + # Verify mocks were called mock_exporter.assert_called_once() mock_metric_reader.assert_called_once() @@ -115,68 +113,70 @@ def test_initialize_disabled_manager(self): """Test that initialization fails when manager is disabled.""" # Disable customer SDK stats os.environ[_APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW] = "false" - + # Create disabled manager - if hasattr(CustomerSdkStatsManager, '_instances'): + if hasattr(CustomerSdkStatsManager, "_instances"): CustomerSdkStatsManager._instances = {} disabled_manager = CustomerSdkStatsManager() - + connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" result = disabled_manager.initialize(connection_string) - + self.assertFalse(result) self.assertEqual(disabled_manager.status, CustomerSdkStatsStatus.DISABLED) def test_initialize_empty_connection_string(self): """Test that initialization fails with empty connection string.""" result = self.manager.initialize("") - + self.assertFalse(result) self.assertEqual(self.manager.status, CustomerSdkStatsStatus.UNINITIALIZED) def test_initialize_multiple_calls(self): """Test that multiple initialization calls are handled correctly.""" - with patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider') as mock_meter_provider: - + with patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter"), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader" + ), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider" + ) as mock_meter_provider: + # Setup mock meter mock_meter = Mock() mock_meter_provider_instance = Mock() mock_meter_provider_instance.get_meter.return_value = mock_meter mock_meter_provider.return_value = mock_meter_provider_instance - + connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" - + # First initialization result1 = self.manager.initialize(connection_string) self.assertTrue(result1) self.assertTrue(self.manager.is_initialized) - + # Second initialization should return True but not reinitialize result2 = self.manager.initialize(connection_string) self.assertTrue(result2) self.assertTrue(self.manager.is_initialized) - + # Verify exporter was only created once self.assertEqual(mock_meter_provider.call_count, 1) - @patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter') + @patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter") def test_initialize_failure(self, mock_exporter): """Test initialization failure handling.""" # Make exporter creation fail mock_exporter.side_effect = Exception("Initialization failed") - + connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" result = self.manager.initialize(connection_string) - + self.assertFalse(result) self.assertEqual(self.manager.status, CustomerSdkStatsStatus.UNINITIALIZED) def test_shutdown_uninitialized(self): """Test shutdown when manager is not initialized.""" result = self.manager.shutdown() - + self.assertFalse(result) self.assertEqual(self.manager.status, CustomerSdkStatsStatus.UNINITIALIZED) @@ -184,86 +184,92 @@ def test_shutdown_already_shutdown(self): """Test shutdown when manager is already shut down.""" # Manually set status to shutdown self.manager._status = CustomerSdkStatsStatus.SHUTDOWN - + result = self.manager.shutdown() - + self.assertFalse(result) self.assertEqual(self.manager.status, CustomerSdkStatsStatus.SHUTDOWN) def test_shutdown_success(self): """Test successful shutdown of initialized manager.""" - with patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider') as mock_meter_provider: - + with patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter"), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader" + ), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider" + ) as mock_meter_provider: + # Setup mock meter mock_meter = Mock() mock_meter_provider_instance = Mock() mock_meter_provider_instance.get_meter.return_value = mock_meter mock_meter_provider.return_value = mock_meter_provider_instance - + # Initialize first connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" self.manager.initialize(connection_string) self.assertTrue(self.manager.is_initialized) - + # Test shutdown result = self.manager.shutdown() - + self.assertTrue(result) self.assertEqual(self.manager.status, CustomerSdkStatsStatus.SHUTDOWN) self.assertTrue(self.manager.is_shutdown) - + # Verify meter provider shutdown was called mock_meter_provider_instance.shutdown.assert_called_once() - + # Verify singleton is not cleared on shutdown manager2 = CustomerSdkStatsManager() self.assertIs(self.manager, manager2) def test_shutdown_with_exception(self): """Test shutdown when meter provider shutdown throws exception.""" - with patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider') as mock_meter_provider: - + with patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter"), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader" + ), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider" + ) as mock_meter_provider: + # Setup mock meter mock_meter = Mock() mock_meter_provider_instance = Mock() mock_meter_provider_instance.get_meter.return_value = mock_meter mock_meter_provider_instance.shutdown.side_effect = Exception("Shutdown failed") mock_meter_provider.return_value = mock_meter_provider_instance - + # Initialize first connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" self.manager.initialize(connection_string) - + # Test shutdown - should still mark as shutdown even if exception occurs result = self.manager.shutdown() - + self.assertFalse(result) # Returns False due to exception self.assertEqual(self.manager.status, CustomerSdkStatsStatus.SHUTDOWN) def test_count_successful_items(self): """Test counting successful items.""" # Initialize manager - with patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider') as mock_meter_provider: - + with patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter"), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader" + ), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider" + ) as mock_meter_provider: + mock_meter = Mock() mock_meter_provider_instance = Mock() mock_meter_provider_instance.get_meter.return_value = mock_meter mock_meter_provider.return_value = mock_meter_provider_instance - + connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" self.manager.initialize(connection_string) - + # Count successful items self.manager.count_successful_items(5, _REQUEST) self.manager.count_successful_items(3, _CUSTOM_EVENT) self.manager.count_successful_items(2, _REQUEST) # Add to existing - + # Verify counters self.assertEqual(self.manager._counters.total_item_success_count[_REQUEST], 7) self.assertEqual(self.manager._counters.total_item_success_count[_CUSTOM_EVENT], 3) @@ -271,138 +277,153 @@ def test_count_successful_items(self): def test_count_successful_items_uninitialized(self): """Test that counting successful items does nothing when not initialized.""" self.manager.count_successful_items(5, _REQUEST) - + # Verify no counters were set self.assertEqual(len(self.manager._counters.total_item_success_count), 0) def test_count_successful_items_zero_count(self): """Test that zero or negative counts are ignored.""" - with patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider') as mock_meter_provider: - + with patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter"), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader" + ), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider" + ) as mock_meter_provider: + mock_meter = Mock() mock_meter_provider_instance = Mock() mock_meter_provider_instance.get_meter.return_value = mock_meter mock_meter_provider.return_value = mock_meter_provider_instance - + connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" self.manager.initialize(connection_string) - + # Try to count zero and negative self.manager.count_successful_items(0, _REQUEST) self.manager.count_successful_items(-1, _REQUEST) - + # Verify no counters were set self.assertEqual(len(self.manager._counters.total_item_success_count), 0) def test_count_dropped_items(self): """Test counting dropped items.""" - with patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider') as mock_meter_provider: - + with patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter"), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader" + ), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider" + ) as mock_meter_provider: + mock_meter = Mock() mock_meter_provider_instance = Mock() mock_meter_provider_instance.get_meter.return_value = mock_meter mock_meter_provider.return_value = mock_meter_provider_instance - + connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" self.manager.initialize(connection_string) - + # Count dropped items self.manager.count_dropped_items(3, _REQUEST, 404, True) self.manager.count_dropped_items(2, _REQUEST, DropCode.UNKNOWN, False) self.manager.count_dropped_items(1, _CUSTOM_EVENT, DropCode.CLIENT_EXCEPTION, True, "Custom error") - + # Verify counters structure self.assertIn(_REQUEST, self.manager._counters.total_item_drop_count) - self.assertEqual(self.manager._counters.total_item_drop_count[_REQUEST][404], {'Not found': {True: 3}}) - self.assertEqual(self.manager._counters.total_item_drop_count[_REQUEST][DropCode.UNKNOWN], {'Unknown reason': {False: 2}}) - self.assertEqual(self.manager._counters.total_item_drop_count[_CUSTOM_EVENT][DropCode.CLIENT_EXCEPTION], {'Custom error': {True: 1}}) + self.assertEqual(self.manager._counters.total_item_drop_count[_REQUEST][404], {"Not found": {True: 3}}) + self.assertEqual( + self.manager._counters.total_item_drop_count[_REQUEST][DropCode.UNKNOWN], {"Unknown reason": {False: 2}} + ) + self.assertEqual( + self.manager._counters.total_item_drop_count[_CUSTOM_EVENT][DropCode.CLIENT_EXCEPTION], + {"Custom error": {True: 1}}, + ) def test_count_dropped_items_none_success(self): """Test that dropped items with None success are ignored.""" - with patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider') as mock_meter_provider: - + with patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter"), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader" + ), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider" + ) as mock_meter_provider: + mock_meter = Mock() mock_meter_provider_instance = Mock() mock_meter_provider_instance.get_meter.return_value = mock_meter mock_meter_provider.return_value = mock_meter_provider_instance - + connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" self.manager.initialize(connection_string) - + # Try to count with None success self.manager.count_dropped_items(3, _REQUEST, DropCode.UNKNOWN, None) - + # Verify no counters were set self.assertEqual(len(self.manager._counters.total_item_drop_count), 0) def test_count_retry_items(self): """Test counting retry items.""" - with patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider') as mock_meter_provider: - + with patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter"), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader" + ), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider" + ) as mock_meter_provider: + mock_meter = Mock() mock_meter_provider_instance = Mock() mock_meter_provider_instance.get_meter.return_value = mock_meter mock_meter_provider.return_value = mock_meter_provider_instance - + connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" self.manager.initialize(connection_string) - + # Count retry items self.manager.count_retry_items(2, _REQUEST, RetryCode.CLIENT_TIMEOUT) self.manager.count_retry_items(1, _DEPENDENCY, 500, "Server error") self.manager.count_retry_items(3, _REQUEST, RetryCode.CLIENT_TIMEOUT) # Add to existing - + # Verify counters structure self.assertIn(_REQUEST, self.manager._counters.total_item_retry_count) self.assertIn(RetryCode.CLIENT_TIMEOUT, self.manager._counters.total_item_retry_count[_REQUEST]) def test_threading_safety(self): """Test that the manager is thread-safe.""" - with patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider') as mock_meter_provider: - + with patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter"), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader" + ), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider" + ) as mock_meter_provider: + mock_meter = Mock() mock_meter_provider_instance = Mock() mock_meter_provider_instance.get_meter.return_value = mock_meter mock_meter_provider.return_value = mock_meter_provider_instance - + connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" self.manager.initialize(connection_string) - + # Define a function to run in threads def count_items(): for i in range(100): self.manager.count_successful_items(1, _REQUEST) self.manager.count_dropped_items(1, _REQUEST, DropCode.UNKNOWN, True) self.manager.count_retry_items(1, _REQUEST, RetryCode.CLIENT_TIMEOUT) - + # Create and start multiple threads threads = [] for _ in range(5): thread = threading.Thread(target=count_items) threads.append(thread) thread.start() - + # Wait for all threads to complete for thread in threads: thread.join() - + # Verify that all counts were recorded (should be 500 total) self.assertEqual(self.manager._counters.total_item_success_count[_REQUEST], 500) def test_telemetry_counters_initialization(self): """Test that TelemetryCounters is properly initialized.""" counters = _CustomerSdkStatsTelemetryCounters() - + self.assertIsInstance(counters.total_item_success_count, dict) self.assertIsInstance(counters.total_item_drop_count, dict) self.assertIsInstance(counters.total_item_retry_count, dict) @@ -412,64 +433,68 @@ def test_telemetry_counters_initialization(self): def test_get_drop_reason(self): """Test _get_drop_reason method.""" - with patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider') as mock_meter_provider: - + with patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter"), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader" + ), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider" + ) as mock_meter_provider: + mock_meter = Mock() mock_meter_provider_instance = Mock() mock_meter_provider_instance.get_meter.return_value = mock_meter mock_meter_provider.return_value = mock_meter_provider_instance - + connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" self.manager.initialize(connection_string) - + # Test with status code reason = self.manager._get_drop_reason(400) self.assertEqual(reason, "Bad request") - + # Test with DropCode enum reason = self.manager._get_drop_reason(DropCode.CLIENT_READONLY) self.assertEqual(reason, "Client readonly") - + # Test with client exception and custom message reason = self.manager._get_drop_reason(DropCode.CLIENT_EXCEPTION, "Custom error") self.assertEqual(reason, "Custom error") - + # Test with client exception and no message reason = self.manager._get_drop_reason(DropCode.CLIENT_EXCEPTION) self.assertEqual(reason, "Client exception") def test_get_retry_reason(self): """Test _get_retry_reason method.""" - with patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader'), \ - patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider') as mock_meter_provider: - + with patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter"), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.PeriodicExportingMetricReader" + ), patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._manager.MeterProvider" + ) as mock_meter_provider: + mock_meter = Mock() mock_meter_provider_instance = Mock() mock_meter_provider_instance.get_meter.return_value = mock_meter mock_meter_provider.return_value = mock_meter_provider_instance - + connection_string = "InstrumentationKey=12345678-1234-5678-abcd-12345678abcd" self.manager.initialize(connection_string) - + # Test with status code reason = self.manager._get_retry_reason(500) self.assertEqual(reason, "Internal server error") - + # Test with RetryCode enum reason = self.manager._get_retry_reason(RetryCode.CLIENT_TIMEOUT) self.assertEqual(reason, "Client timeout") - + # Test with client exception and custom message reason = self.manager._get_retry_reason(RetryCode.CLIENT_EXCEPTION, "Network error") self.assertEqual(reason, "Network error") - + # Test with client exception and no message reason = self.manager._get_retry_reason(RetryCode.CLIENT_EXCEPTION) self.assertEqual(reason, "Client exception") -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/customer_sdk_stats/test_utlities.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/customer_sdk_stats/test_utlities.py index 1a482d301b65..9d41184bbc8c 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/customer_sdk_stats/test_utlities.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/customer_sdk_stats/test_utlities.py @@ -9,7 +9,7 @@ from azure.core.exceptions import ServiceRequestTimeoutError, HttpResponseError from requests.exceptions import ConnectionError, ReadTimeout, Timeout -from azure.monitor.opentelemetry.exporter._generated.models import ( +from azure.monitor.opentelemetry.exporter._generated.exporter.models import ( TelemetryItem, TelemetryEventData, RequestData, @@ -48,11 +48,11 @@ def setUp(self): """Set up test environment.""" # Enable customer SDK stats for testing os.environ[_APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW] = "true" - + # Reset the customer stats manager for each test manager = get_customer_stats_manager() manager.shutdown() - + # Create sample telemetry items for testing self._create_test_envelopes() @@ -61,7 +61,7 @@ def tearDown(self): # Clean up environment variables os.environ.pop(_APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW, None) os.environ.pop(_APPLICATIONINSIGHTS_SDKSTATS_EXPORT_INTERVAL, None) - + # Shutdown customer stats manager = get_customer_stats_manager() manager.shutdown() @@ -69,14 +69,8 @@ def tearDown(self): def _create_test_envelopes(self): """Create test telemetry envelopes for various telemetry types.""" # Event envelope - event_data = TelemetryEventData( - name="test_event", - properties={"test_property": "test_value"} - ) - event_monitor_base = MonitorBase( - base_type="EventData", - base_data=event_data - ) + event_data = TelemetryEventData(name="test_event", properties={"test_property": "test_value"}) + event_monitor_base = MonitorBase(base_type="EventData", base_data=event_data) self.event_envelope = TelemetryItem( name="test_event_envelope", time=datetime.now(), @@ -92,12 +86,9 @@ def _create_test_envelopes(self): url="https://example.com/test", success=True, response_code="200", - duration="PT0.1S" - ) - request_monitor_base = MonitorBase( - base_type="RequestData", - base_data=request_data + duration="PT0.1S", ) + request_monitor_base = MonitorBase(base_type="RequestData", base_data=request_data) self.request_envelope_success = TelemetryItem( name="test_request_envelope", time=datetime.now(), @@ -113,12 +104,9 @@ def _create_test_envelopes(self): url="https://example.com/test", success=False, response_code="500", - duration="PT0.1S" - ) - failed_request_monitor_base = MonitorBase( - base_type="RequestData", - base_data=failed_request_data + duration="PT0.1S", ) + failed_request_monitor_base = MonitorBase(base_type="RequestData", base_data=failed_request_data) self.request_envelope_failed = TelemetryItem( name="test_failed_request_envelope", time=datetime.now(), @@ -134,12 +122,9 @@ def _create_test_envelopes(self): target="example.com", success=True, result_code="200", - duration="PT0.1S" - ) - dependency_monitor_base = MonitorBase( - base_type="RemoteDependencyData", - base_data=dependency_data + duration="PT0.1S", ) + dependency_monitor_base = MonitorBase(base_type="RemoteDependencyData", base_data=dependency_data) self.dependency_envelope_success = TelemetryItem( name="test_dependency_envelope", time=datetime.now(), @@ -155,12 +140,9 @@ def _create_test_envelopes(self): target="example.com", success=False, result_code="500", - duration="PT0.1S" - ) - failed_dependency_monitor_base = MonitorBase( - base_type="RemoteDependencyData", - base_data=failed_dependency_data + duration="PT0.1S", ) + failed_dependency_monitor_base = MonitorBase(base_type="RemoteDependencyData", base_data=failed_dependency_data) self.dependency_envelope_failed = TelemetryItem( name="test_failed_dependency_envelope", time=datetime.now(), @@ -173,27 +155,27 @@ def test_get_customer_sdkstats_export_interval_default(self): """Test getting default export interval when environment variable is not set.""" # Ensure environment variable is not set os.environ.pop(_APPLICATIONINSIGHTS_SDKSTATS_EXPORT_INTERVAL, None) - + interval = get_customer_sdkstats_export_interval() - + self.assertEqual(interval, _DEFAULT_STATS_SHORT_EXPORT_INTERVAL) def test_get_customer_sdkstats_export_interval_custom(self): """Test getting custom export interval from environment variable.""" # Set custom interval os.environ[_APPLICATIONINSIGHTS_SDKSTATS_EXPORT_INTERVAL] = "30" - + interval = get_customer_sdkstats_export_interval() - + self.assertEqual(interval, 30) def test_get_customer_sdkstats_export_interval_invalid(self): """Test getting export interval with invalid environment variable value.""" # Set invalid interval os.environ[_APPLICATIONINSIGHTS_SDKSTATS_EXPORT_INTERVAL] = "invalid" - + interval = get_customer_sdkstats_export_interval() - + # Should return default when invalid self.assertEqual(interval, _DEFAULT_STATS_SHORT_EXPORT_INTERVAL) @@ -201,40 +183,40 @@ def test_get_customer_sdkstats_export_interval_empty(self): """Test getting export interval with empty environment variable.""" # Set empty interval os.environ[_APPLICATIONINSIGHTS_SDKSTATS_EXPORT_INTERVAL] = "" - + interval = get_customer_sdkstats_export_interval() - + # Should return default when empty self.assertEqual(interval, _DEFAULT_STATS_SHORT_EXPORT_INTERVAL) def test_is_customer_sdkstats_enabled_true(self): """Test checking if customer SDK stats is enabled (true case).""" os.environ[_APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW] = "true" - + result = is_customer_sdkstats_enabled() - + self.assertTrue(result) def test_is_customer_sdkstats_enabled_false(self): """Test checking if customer SDK stats is enabled (false case).""" os.environ[_APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW] = "false" - + result = is_customer_sdkstats_enabled() - + self.assertFalse(result) def test_is_customer_sdkstats_enabled_not_set(self): """Test checking if customer SDK stats is enabled when not set.""" os.environ.pop(_APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW, None) - + result = is_customer_sdkstats_enabled() - + self.assertFalse(result) def test_is_customer_sdkstats_enabled_case_insensitive(self): """Test that enabled check is case insensitive.""" test_cases = ["TRUE", "True", "tRuE", "true"] - + for case in test_cases: with self.subTest(case=case): os.environ[_APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW] = case @@ -257,7 +239,7 @@ def test_categorize_status_code_specific_codes(self): 503: "Service unavailable", 504: "Gateway timeout", } - + for status_code, expected_message in test_cases.items(): with self.subTest(status_code=status_code): result = categorize_status_code(status_code) @@ -266,7 +248,7 @@ def test_categorize_status_code_specific_codes(self): def test_categorize_status_code_generic_4xx(self): """Test categorization of generic 4xx status codes.""" test_codes = [405, 410, 418, 499] # Various 4xx codes not in specific map - + for status_code in test_codes: with self.subTest(status_code=status_code): result = categorize_status_code(status_code) @@ -275,7 +257,7 @@ def test_categorize_status_code_generic_4xx(self): def test_categorize_status_code_generic_5xx(self): """Test categorization of generic 5xx status codes.""" test_codes = [501, 505, 550, 599] # Various 5xx codes not in specific map - + for status_code in test_codes: with self.subTest(status_code=status_code): result = categorize_status_code(status_code) @@ -289,7 +271,7 @@ def test_categorize_status_code_other(self): 600: "status_600", 100: "status_100", } - + for status_code, expected_message in test_cases.items(): with self.subTest(status_code=status_code): result = categorize_status_code(status_code) @@ -298,14 +280,14 @@ def test_categorize_status_code_other(self): def test_determine_client_retry_code_http_errors(self): """Test determining retry code for HTTP errors with status codes.""" test_status_codes = [401, 403, 408, 429, 500, 502, 503, 504] - + for status_code in test_status_codes: with self.subTest(status_code=status_code): error = HttpResponseError("Test error") error.status_code = status_code - + retry_code, message = _determine_client_retry_code(error) - + self.assertEqual(retry_code, status_code) self.assertIsNotNone(message) @@ -314,9 +296,9 @@ def test_determine_client_retry_code_http_error_with_message(self): error = HttpResponseError("Custom error message") error.status_code = 500 error.message = "Custom error message" - + retry_code, message = _determine_client_retry_code(error) - + self.assertEqual(retry_code, 500) self.assertEqual(message, "Custom error message") @@ -328,29 +310,30 @@ def test_determine_client_retry_code_timeout_errors(self): TimeoutError("Generic timeout"), Timeout("Requests timeout"), ] - + for error in timeout_errors: with self.subTest(error_type=type(error).__name__): retry_code, message = _determine_client_retry_code(error) - + self.assertEqual(retry_code, RetryCode.CLIENT_TIMEOUT) self.assertEqual(message, _exception_categories.TIMEOUT_EXCEPTION.value) def test_determine_client_retry_code_timeout_in_message(self): """Test determining retry code for errors with timeout in message.""" + class CustomError(Exception): def __init__(self, message): self.message = message super().__init__(message) - + timeout_messages = ["Connection timeout occurred", "Request timed out"] - + for message in timeout_messages: with self.subTest(message=message): error = CustomError(message) - + retry_code, result_message = _determine_client_retry_code(error) - + self.assertEqual(retry_code, RetryCode.CLIENT_TIMEOUT) self.assertEqual(result_message, _exception_categories.TIMEOUT_EXCEPTION.value) @@ -360,51 +343,51 @@ def test_determine_client_retry_code_network_errors(self): ConnectionError("Connection failed"), OSError("OS error occurred"), ] - + for error in network_errors: with self.subTest(error_type=type(error).__name__): retry_code, message = _determine_client_retry_code(error) - + self.assertEqual(retry_code, RetryCode.CLIENT_EXCEPTION) self.assertEqual(message, _exception_categories.NETWORK_EXCEPTION.value) def test_determine_client_retry_code_generic_error(self): """Test determining retry code for generic errors.""" generic_error = ValueError("Generic error") - + retry_code, message = _determine_client_retry_code(generic_error) - + self.assertEqual(retry_code, RetryCode.CLIENT_EXCEPTION) self.assertEqual(message, _exception_categories.CLIENT_EXCEPTION.value) def test_get_telemetry_success_flag_request_success(self): """Test extracting success flag from successful request envelope.""" success_flag = _get_telemetry_success_flag(self.request_envelope_success) - + self.assertTrue(success_flag) def test_get_telemetry_success_flag_request_failed(self): """Test extracting success flag from failed request envelope.""" success_flag = _get_telemetry_success_flag(self.request_envelope_failed) - + self.assertFalse(success_flag) def test_get_telemetry_success_flag_dependency_success(self): """Test extracting success flag from successful dependency envelope.""" success_flag = _get_telemetry_success_flag(self.dependency_envelope_success) - + self.assertTrue(success_flag) def test_get_telemetry_success_flag_dependency_failed(self): """Test extracting success flag from failed dependency envelope.""" success_flag = _get_telemetry_success_flag(self.dependency_envelope_failed) - + self.assertFalse(success_flag) def test_get_telemetry_success_flag_event_envelope(self): """Test extracting success flag from event envelope (should return None).""" success_flag = _get_telemetry_success_flag(self.event_envelope) - + self.assertIsNone(success_flag) def test_get_telemetry_success_flag_no_data(self): @@ -416,17 +399,14 @@ def test_get_telemetry_success_flag_no_data(self): tags={"ai.internal.sdkVersion": "test_version"}, instrumentation_key="test_key", ) - + success_flag = _get_telemetry_success_flag(envelope) - + self.assertIsNone(success_flag) def test_get_telemetry_success_flag_no_base_type(self): """Test extracting success flag from envelope with no base_type.""" - monitor_base = MonitorBase( - base_type=None, - base_data=None - ) + monitor_base = MonitorBase(base_type=None, base_data=None) envelope = TelemetryItem( name="test_envelope", time=datetime.now(), @@ -434,137 +414,139 @@ def test_get_telemetry_success_flag_no_base_type(self): tags={"ai.internal.sdkVersion": "test_version"}, instrumentation_key="test_key", ) - + success_flag = _get_telemetry_success_flag(envelope) - + self.assertIsNone(success_flag) - @mock.patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.get_customer_stats_manager') + @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.get_customer_stats_manager") def test_track_successful_items(self, mock_get_manager): """Test tracking successful items calls the manager correctly.""" mock_manager = mock.Mock() mock_get_manager.return_value = mock_manager - + envelopes = [self.request_envelope_success, self.event_envelope] - + track_successful_items(envelopes) - + # Verify manager was called for each envelope self.assertEqual(mock_manager.count_successful_items.call_count, 2) - @mock.patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.get_customer_stats_manager') + @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.get_customer_stats_manager") def test_track_dropped_items_without_message(self, mock_get_manager): """Test tracking dropped items without error message.""" mock_manager = mock.Mock() mock_get_manager.return_value = mock_manager - + envelopes = [self.request_envelope_success, self.event_envelope] - + track_dropped_items(envelopes, 400) # Use HTTP status code 400 as DropCodeType - + # Verify manager was called for each envelope self.assertEqual(mock_manager.count_dropped_items.call_count, 2) - @mock.patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.get_customer_stats_manager') + @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.get_customer_stats_manager") def test_track_dropped_items_with_message(self, mock_get_manager): """Test tracking dropped items with error message.""" mock_manager = mock.Mock() mock_get_manager.return_value = mock_manager - + envelopes = [self.request_envelope_success] error_message = "Custom error message" - + track_dropped_items(envelopes, DropCode.CLIENT_EXCEPTION, error_message) - + # Verify manager was called with error message mock_manager.count_dropped_items.assert_called_once() args = mock_manager.count_dropped_items.call_args - self.assertEqual(args[1]['exception_message'], error_message) + self.assertEqual(args[1]["exception_message"], error_message) - @mock.patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.get_customer_stats_manager') + @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.get_customer_stats_manager") def test_track_retry_items(self, mock_get_manager): """Test tracking retry items calls the manager correctly.""" mock_manager = mock.Mock() mock_get_manager.return_value = mock_manager - + envelopes = [self.request_envelope_success, self.dependency_envelope_success] error = ConnectionError("Network error") - + track_retry_items(envelopes, error) - + # Verify manager was called for each envelope self.assertEqual(mock_manager.count_retry_items.call_count, 2) - @mock.patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.track_dropped_items') + @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.track_dropped_items") def test_track_dropped_items_from_storage_disabled(self, mock_track_dropped): """Test tracking dropped items from storage when storage is disabled.""" envelopes = [self.event_envelope] - + track_dropped_items_from_storage(StorageExportResult.CLIENT_STORAGE_DISABLED, envelopes) - + mock_track_dropped.assert_called_once_with(envelopes, DropCode.CLIENT_STORAGE_DISABLED) - @mock.patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.track_dropped_items') + @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.track_dropped_items") def test_track_dropped_items_from_storage_readonly(self, mock_track_dropped): """Test tracking dropped items from storage when storage is readonly.""" envelopes = [self.event_envelope] - + track_dropped_items_from_storage(StorageExportResult.CLIENT_READONLY, envelopes) - + mock_track_dropped.assert_called_once_with(envelopes, DropCode.CLIENT_READONLY) - @mock.patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.track_dropped_items') + @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.track_dropped_items") def test_track_dropped_items_from_storage_capacity_reached(self, mock_track_dropped): """Test tracking dropped items from storage when capacity is reached.""" envelopes = [self.event_envelope] - + track_dropped_items_from_storage(StorageExportResult.CLIENT_PERSISTENCE_CAPACITY_REACHED, envelopes) - + mock_track_dropped.assert_called_once_with(envelopes, DropCode.CLIENT_PERSISTENCE_CAPACITY) - @mock.patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.get_local_storage_setup_state_exception') - @mock.patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.track_dropped_items') + @mock.patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.get_local_storage_setup_state_exception" + ) + @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.track_dropped_items") def test_track_dropped_items_from_storage_exception_state(self, mock_track_dropped, mock_get_exception): """Test tracking dropped items from storage when exception state is set.""" mock_get_exception.return_value = "Storage exception occurred" envelopes = [self.event_envelope] - + track_dropped_items_from_storage("some_result", envelopes) - + mock_track_dropped.assert_called_once_with( - envelopes, - DropCode.CLIENT_EXCEPTION, - _exception_categories.STORAGE_EXCEPTION.value + envelopes, DropCode.CLIENT_EXCEPTION, _exception_categories.STORAGE_EXCEPTION.value ) - @mock.patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.get_local_storage_setup_state_exception') - @mock.patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.track_dropped_items') + @mock.patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.get_local_storage_setup_state_exception" + ) + @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.track_dropped_items") def test_track_dropped_items_from_storage_string_result(self, mock_track_dropped, mock_get_exception): """Test tracking dropped items from storage when result is a string (exception).""" mock_get_exception.return_value = "" # No exception state envelopes = [self.event_envelope] - + track_dropped_items_from_storage("Exception string", envelopes) - + mock_track_dropped.assert_called_once_with( - envelopes, - DropCode.CLIENT_EXCEPTION, - _exception_categories.STORAGE_EXCEPTION.value + envelopes, DropCode.CLIENT_EXCEPTION, _exception_categories.STORAGE_EXCEPTION.value ) - @mock.patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.get_local_storage_setup_state_exception') - @mock.patch('azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.track_dropped_items') + @mock.patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.get_local_storage_setup_state_exception" + ) + @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat.customer._utils.track_dropped_items") def test_track_dropped_items_from_storage_success(self, mock_track_dropped, mock_get_exception): """Test tracking dropped items from storage when operation is successful.""" mock_get_exception.return_value = "" # No exception state envelopes = [self.event_envelope] - + # Simulate successful storage operation track_dropped_items_from_storage(StorageExportResult.LOCAL_FILE_BLOB_SUCCESS, envelopes) - + # Should not call track_dropped_items for successful operations mock_track_dropped.assert_not_called() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_logs.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_logs.py index 51946a92599e..d35b1a684e6f 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_logs.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_logs.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +import datetime import json import os import platform @@ -33,11 +34,12 @@ _MICROSOFT_CUSTOM_EVENT_NAME, _DEFAULT_LOG_MESSAGE, ) -from azure.monitor.opentelemetry.exporter._generated.models import ContextTagKeys -from azure.monitor.opentelemetry.exporter._utils import ( - azure_monitor_context, - ns_to_iso_str, -) +from azure.monitor.opentelemetry.exporter._generated.exporter.models import ContextTagKeys +from azure.monitor.opentelemetry.exporter._utils import azure_monitor_context + + +def ns_to_datetime(ns: int) -> datetime.datetime: + return datetime.datetime.fromtimestamp(ns / 1e9, tz=datetime.timezone.utc) def throw(exc_type, *args, **kwargs): @@ -396,7 +398,7 @@ def test_log_to_envelope_log(self): envelope = exporter._log_to_envelope(self._log_data) record = self._log_data.log_record self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Message") - self.assertEqual(envelope.time, ns_to_iso_str(record.timestamp)) + self.assertEqual(envelope.time, ns_to_datetime(record.timestamp)) self.assertEqual(envelope.data.base_type, "MessageData") self.assertEqual(envelope.data.base_data.message, record.body) self.assertEqual(envelope.data.base_data.severity_level, 2) @@ -416,7 +418,7 @@ def test_log_to_envelope_log_empty(self): self.assertEqual(envelope.data.base_type, "MessageData") self.assertEqual(envelope.data.base_data.message, _DEFAULT_LOG_MESSAGE) self.assertEqual(envelope.tags.get(ContextTagKeys.AI_OPERATION_NAME), "TestOperationName") - + def test_log_to_envelope_log_empty_with_whitespaces(self): exporter = self._exporter envelope = exporter._log_to_envelope(self._log_data_empty_with_whitespaces) @@ -446,7 +448,7 @@ def test_log_to_envelope_exception_with_string_message(self): envelope = exporter._log_to_envelope(self._exc_data) record = self._log_data.log_record self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Exception") - self.assertEqual(envelope.time, ns_to_iso_str(record.timestamp)) + self.assertEqual(envelope.time, ns_to_datetime(record.timestamp)) self.assertEqual(envelope.data.base_type, "ExceptionData") self.assertEqual(envelope.data.base_data.severity_level, 4) self.assertEqual(envelope.data.base_data.properties["test"], "attribute") @@ -464,7 +466,7 @@ def test_log_to_envelope_exception_with_exc_message(self): envelope = exporter._log_to_envelope(self._exc_data_with_exc_body) record = self._log_data.log_record self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Exception") - self.assertEqual(envelope.time, ns_to_iso_str(record.timestamp)) + self.assertEqual(envelope.time, ns_to_datetime(record.timestamp)) self.assertEqual(envelope.data.base_type, "ExceptionData") self.assertEqual(envelope.data.base_data.severity_level, 4) self.assertEqual(envelope.data.base_data.properties["test"], "attribute") @@ -482,7 +484,7 @@ def test_log_to_envelope_exception_empty(self): envelope = exporter._log_to_envelope(self._exc_data_empty) record = self._log_data.log_record self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Exception") - self.assertEqual(envelope.time, ns_to_iso_str(record.timestamp)) + self.assertEqual(envelope.time, ns_to_datetime(record.timestamp)) self.assertEqual(envelope.data.base_type, "ExceptionData") self.assertEqual(envelope.data.base_data.severity_level, 4) self.assertEqual(envelope.data.base_data.properties["test"], "attribute") @@ -497,7 +499,7 @@ def test_log_to_envelope_exception_with_blank_exception(self): envelope = exporter._log_to_envelope(self._exc_data_blank_exception) record = self._log_data.log_record self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Exception") - self.assertEqual(envelope.time, ns_to_iso_str(record.timestamp)) + self.assertEqual(envelope.time, ns_to_datetime(record.timestamp)) self.assertEqual(envelope.data.base_type, "ExceptionData") self.assertEqual(envelope.data.base_data.severity_level, 4) self.assertEqual(envelope.data.base_data.properties["test"], "attribute") @@ -512,7 +514,7 @@ def test_log_to_envelope_event(self): envelope = exporter._log_to_envelope(self._log_data_event) record = self._log_data_event.log_record self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Event") - self.assertEqual(envelope.time, ns_to_iso_str(record.timestamp)) + self.assertEqual(envelope.time, ns_to_datetime(record.timestamp)) self.assertEqual(envelope.data.base_type, "EventData") self.assertEqual(envelope.data.base_data.name, record.body) self.assertEqual(envelope.data.base_data.properties["event_key"], "event_attribute") @@ -522,7 +524,7 @@ def test_log_to_envelope_event_complex_body(self): envelope = exporter._log_to_envelope(self._log_data_event_complex_body) record = self._log_data_event_complex_body.log_record self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Event") - self.assertEqual(envelope.time, ns_to_iso_str(record.timestamp)) + self.assertEqual(envelope.time, ns_to_datetime(record.timestamp)) self.assertEqual(envelope.data.base_type, "EventData") self.assertEqual(envelope.data.base_data.name, json.dumps(record.body)) self.assertEqual(envelope.data.base_data.properties["event_key"], "event_attribute") @@ -532,7 +534,7 @@ def test_log_to_envelope_event_complex_body_not_serializeable(self): envelope = exporter._log_to_envelope(self._log_data_event_complex_body_not_serializeable) record = self._log_data_event_complex_body_not_serializeable.log_record self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Event") - self.assertEqual(envelope.time, ns_to_iso_str(record.timestamp)) + self.assertEqual(envelope.time, ns_to_datetime(record.timestamp)) self.assertEqual(envelope.data.base_type, "EventData") self.assertEqual(envelope.data.base_data.name, str(record.body)) self.assertEqual(envelope.data.base_data.properties["event_key"], "event_attribute") @@ -543,7 +545,7 @@ def test_log_to_envelope_custom_event(self): record = self._log_data_custom_event.log_record self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Event") self.assertEqual(envelope.tags["ai.location.ip"], "192.168.1.1") - self.assertEqual(envelope.time, ns_to_iso_str(record.timestamp)) + self.assertEqual(envelope.time, ns_to_datetime(record.timestamp)) self.assertEqual(envelope.data.base_type, "EventData") self.assertEqual(envelope.data.base_data.name, "event_name") self.assertEqual(envelope.data.base_data.properties["event_key"], "event_attribute") @@ -555,7 +557,7 @@ def test_log_to_envelope_timestamp(self): self._log_data.log_record.observed_timestamp = 1646865018558419457 envelope = exporter._log_to_envelope(self._log_data) record = self._log_data.log_record - self.assertEqual(envelope.time, ns_to_iso_str(record.observed_timestamp)) + self.assertEqual(envelope.time, ns_to_datetime(record.observed_timestamp)) self._log_data.log_record = old_record def test_log_to_envelope_synthetic_source(self): diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_processor.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_processor.py index ed9dace6b025..4de11902d279 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_processor.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/logs/test_processor.py @@ -30,17 +30,13 @@ def setUpClass(cls): def test_processor_initialization_without_trace_based_sampling(self): """Test processor initialization without trace-based sampling enabled.""" - processor = _AzureBatchLogRecordProcessor( - self._exporter, - options={} - ) + processor = _AzureBatchLogRecordProcessor(self._exporter, options={}) self.assertFalse(processor._enable_trace_based_sampling_for_logs) def test_processor_initialization_with_trace_based_sampling(self): """Test processor initialization with trace-based sampling enabled.""" processor = _AzureBatchLogRecordProcessor( - self._exporter, - options={"enable_trace_based_sampling_for_logs": True} + self._exporter, options={"enable_trace_based_sampling_for_logs": True} ) self.assertTrue(processor._enable_trace_based_sampling_for_logs) @@ -51,11 +47,8 @@ def test_processor_initialization_without_options(self): def test_on_emit_with_trace_based_sampling_disabled(self): """Test on_emit does not filter logs when trace-based sampling is disabled.""" - processor = _AzureBatchLogRecordProcessor( - self._exporter, - options={} - ) - + processor = _AzureBatchLogRecordProcessor(self._exporter, options={}) + mock_span_context = mock.Mock() mock_span_context.is_valid = True mock_span_context.trace_flags.sampled = False @@ -84,16 +77,15 @@ def test_on_emit_with_trace_based_sampling_disabled(self): ) # Mock the parent class's on_emit method through super - with mock.patch('opentelemetry.sdk._logs.export.BatchLogRecordProcessor.on_emit') as parent_on_emit_mock: + with mock.patch("opentelemetry.sdk._logs.export.BatchLogRecordProcessor.on_emit") as parent_on_emit_mock: processor.on_emit(log_record) # Parent on_emit should be called because trace-based sampling is disabled parent_on_emit_mock.assert_called_once() - def test_on_emit_with_trace_based_sampling_enabled_and_unsampled_trace(self): # cspell:disable-line - """Test on_emit filters logs when trace-based sampling is enabled and trace is unsampled.""" # cspell:disable-line + def test_on_emit_with_trace_based_sampling_enabled_and_unsampled_trace(self): # cspell:disable-line + """Test on_emit filters logs when trace-based sampling is enabled and trace is unsampled.""" # cspell:disable-line processor = _AzureBatchLogRecordProcessor( - self._exporter, - options={"enable_trace_based_sampling_for_logs": True} + self._exporter, options={"enable_trace_based_sampling_for_logs": True} ) mock_span_context = mock.Mock() @@ -123,9 +115,11 @@ def test_on_emit_with_trace_based_sampling_enabled_and_unsampled_trace(self): # InstrumentationScope("test_name"), ) # Mock get_current_span to return our mock span with proper get_span_context method - with mock.patch("azure.monitor.opentelemetry.exporter.export.logs._processor.get_current_span", return_value=mock_span): + with mock.patch( + "azure.monitor.opentelemetry.exporter.export.logs._processor.get_current_span", return_value=mock_span + ): # Mock only the parent class's on_emit method - with mock.patch('opentelemetry.sdk._logs.export.BatchLogRecordProcessor.on_emit') as parent_on_emit_mock: + with mock.patch("opentelemetry.sdk._logs.export.BatchLogRecordProcessor.on_emit") as parent_on_emit_mock: processor.on_emit(log_record) # Parent on_emit should NOT be called because trace is unsampled and filtering is enabled # cspell:disable-line parent_on_emit_mock.assert_not_called() @@ -133,8 +127,7 @@ def test_on_emit_with_trace_based_sampling_enabled_and_unsampled_trace(self): # def test_on_emit_with_trace_based_sampling_enabled_and_sampled_trace(self): """Test on_emit does not filter logs when trace-based sampling is enabled and trace is sampled.""" processor = _AzureBatchLogRecordProcessor( - self._exporter, - options={"enable_trace_based_sampling_for_logs": True} + self._exporter, options={"enable_trace_based_sampling_for_logs": True} ) mock_span_context = mock.Mock() @@ -143,7 +136,7 @@ def test_on_emit_with_trace_based_sampling_enabled_and_sampled_trace(self): mock_span = mock.Mock() mock_span.get_span_context.return_value = mock_span_context - + span_context = SpanContext( trace_id=125960616039069540489478540494783893221, span_id=2909973987304607650, @@ -164,8 +157,10 @@ def test_on_emit_with_trace_based_sampling_enabled_and_sampled_trace(self): InstrumentationScope("test_name"), ) - with mock.patch("azure.monitor.opentelemetry.exporter.export.logs._processor.get_current_span", return_value=mock_span): - with mock.patch('opentelemetry.sdk._logs.export.BatchLogRecordProcessor.on_emit') as parent_on_emit_mock: + with mock.patch( + "azure.monitor.opentelemetry.exporter.export.logs._processor.get_current_span", return_value=mock_span + ): + with mock.patch("opentelemetry.sdk._logs.export.BatchLogRecordProcessor.on_emit") as parent_on_emit_mock: processor.on_emit(log_record) # Parent on_emit should be called because trace is sampled parent_on_emit_mock.assert_called_once() @@ -173,8 +168,7 @@ def test_on_emit_with_trace_based_sampling_enabled_and_sampled_trace(self): def test_on_emit_with_trace_based_sampling_enabled_and_invalid_span_context(self): """Test on_emit does not filter logs with invalid span context.""" processor = _AzureBatchLogRecordProcessor( - self._exporter, - options={"enable_trace_based_sampling_for_logs": True} + self._exporter, options={"enable_trace_based_sampling_for_logs": True} ) mock_span_context = mock.Mock() @@ -203,8 +197,10 @@ def test_on_emit_with_trace_based_sampling_enabled_and_invalid_span_context(self InstrumentationScope("test_name"), ) - with mock.patch("azure.monitor.opentelemetry.exporter.export.logs._processor.get_current_span", return_value=mock_span): - with mock.patch('opentelemetry.sdk._logs.export.BatchLogRecordProcessor.on_emit') as parent_on_emit_mock: + with mock.patch( + "azure.monitor.opentelemetry.exporter.export.logs._processor.get_current_span", return_value=mock_span + ): + with mock.patch("opentelemetry.sdk._logs.export.BatchLogRecordProcessor.on_emit") as parent_on_emit_mock: processor.on_emit(log_record) # Parent on_emit should be called because span context is invalid parent_on_emit_mock.assert_called_once() @@ -212,8 +208,7 @@ def test_on_emit_with_trace_based_sampling_enabled_and_invalid_span_context(self def test_on_emit_with_trace_based_sampling_enabled_and_no_context(self): """Test on_emit does not filter logs when there is no log record context.""" processor = _AzureBatchLogRecordProcessor( - self._exporter, - options={"enable_trace_based_sampling_for_logs": True} + self._exporter, options={"enable_trace_based_sampling_for_logs": True} ) log_record = _logs.ReadWriteLogRecord( @@ -227,7 +222,7 @@ def test_on_emit_with_trace_based_sampling_enabled_and_no_context(self): InstrumentationScope("test_name"), ) - with mock.patch('opentelemetry.sdk._logs.export.BatchLogRecordProcessor.on_emit') as parent_on_emit_mock: + with mock.patch("opentelemetry.sdk._logs.export.BatchLogRecordProcessor.on_emit") as parent_on_emit_mock: processor.on_emit(log_record) # Parent on_emit should be called because there's no context parent_on_emit_mock.assert_called_once() @@ -235,17 +230,16 @@ def test_on_emit_with_trace_based_sampling_enabled_and_no_context(self): def test_on_emit_integration_with_multiple_log_records(self): """Integration test: verify processor handles multiple log records correctly with trace-based sampling.""" processor = _AzureBatchLogRecordProcessor( - self._exporter, - options={"enable_trace_based_sampling_for_logs": True} + self._exporter, options={"enable_trace_based_sampling_for_logs": True} ) # Create unsampled span context # cspell:disable-line - mock_span_context_unsampled = mock.Mock() # cspell:disable-line - mock_span_context_unsampled.is_valid = True # cspell:disable-line - mock_span_context_unsampled.trace_flags.sampled = False # cspell:disable-line + mock_span_context_unsampled = mock.Mock() # cspell:disable-line + mock_span_context_unsampled.is_valid = True # cspell:disable-line + mock_span_context_unsampled.trace_flags.sampled = False # cspell:disable-line - mock_span_unsampled = mock.Mock() # cspell:disable-line - mock_span_unsampled.get_span_context.return_value = mock_span_context_unsampled # cspell:disable-line + mock_span_unsampled = mock.Mock() # cspell:disable-line + mock_span_unsampled.get_span_context.return_value = mock_span_context_unsampled # cspell:disable-line # Create sampled span context mock_span_context_sampled = mock.Mock() @@ -254,7 +248,7 @@ def test_on_emit_integration_with_multiple_log_records(self): mock_span_sampled = mock.Mock() mock_span_sampled.get_span_context.return_value = mock_span_context_sampled - + span_context = SpanContext( trace_id=125960616039069540489478540494783893221, span_id=2909973987304607650, @@ -264,17 +258,17 @@ def test_on_emit_integration_with_multiple_log_records(self): span = NonRecordingSpan(span_context) ctx = set_span_in_context(span) - log_record_unsampled = _logs.ReadWriteLogRecord( # cspell:disable-line + log_record_unsampled = _logs.ReadWriteLogRecord( # cspell:disable-line LogRecord( timestamp=1646865018558419456, context=ctx, severity_text="INFO", severity_number=SeverityNumber.INFO, - body="Unsampled log", # cspell:disable-line + body="Unsampled log", # cspell:disable-line ), InstrumentationScope("test_name"), ) - + span_context = SpanContext( trace_id=125960616039069540489478540494783893221, span_id=2909973987304607650, @@ -295,11 +289,13 @@ def test_on_emit_integration_with_multiple_log_records(self): InstrumentationScope("test_name"), ) - with mock.patch("azure.monitor.opentelemetry.exporter.export.logs._processor.get_current_span") as get_span_mock: - with mock.patch('opentelemetry.sdk._logs.export.BatchLogRecordProcessor.on_emit') as parent_on_emit_mock: + with mock.patch( + "azure.monitor.opentelemetry.exporter.export.logs._processor.get_current_span" + ) as get_span_mock: + with mock.patch("opentelemetry.sdk._logs.export.BatchLogRecordProcessor.on_emit") as parent_on_emit_mock: # Test unsampled log is filtered # cspell:disable-line - get_span_mock.return_value = mock_span_unsampled # cspell:disable-line - processor.on_emit(log_record_unsampled) # cspell:disable-line + get_span_mock.return_value = mock_span_unsampled # cspell:disable-line + processor.on_emit(log_record_unsampled) # cspell:disable-line parent_on_emit_mock.assert_not_called() # Reset mock diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/metrics/test_metrics.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/metrics/test_metrics.py index 31ffc81f1008..b66cb121b010 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/metrics/test_metrics.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/metrics/test_metrics.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +import datetime import os import platform import shutil @@ -26,11 +27,12 @@ AzureMonitorMetricExporter, _get_metric_export_result, ) -from azure.monitor.opentelemetry.exporter._generated.models import ContextTagKeys -from azure.monitor.opentelemetry.exporter._utils import ( - azure_monitor_context, - ns_to_iso_str, -) +from azure.monitor.opentelemetry.exporter._generated.exporter.models import ContextTagKeys +from azure.monitor.opentelemetry.exporter._utils import azure_monitor_context + + +def ns_to_datetime(ns: int) -> datetime.datetime: + return datetime.datetime.fromtimestamp(ns / 1e9, tz=datetime.timezone.utc) def throw(exc_type, *args, **kwargs): @@ -243,7 +245,7 @@ def test_point_to_envelope_number(self): envelope = exporter._point_to_envelope(point, "test name", resource, scope) self.assertEqual(envelope.instrumentation_key, exporter._instrumentation_key) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Metric") - self.assertEqual(envelope.time, ns_to_iso_str(point.time_unix_nano)) + self.assertEqual(envelope.time, ns_to_datetime(point.time_unix_nano)) self.assertEqual(envelope.data.base_type, "MetricData") self.assertEqual(len(envelope.data.base_data.properties), 1) self.assertEqual(envelope.data.base_data.properties["test"], "attribute") @@ -260,7 +262,7 @@ def test_point_to_envelope_histogram(self): envelope = exporter._point_to_envelope(point, "test name", resource) self.assertEqual(envelope.instrumentation_key, exporter._instrumentation_key) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Metric") - self.assertEqual(envelope.time, ns_to_iso_str(point.time_unix_nano)) + self.assertEqual(envelope.time, ns_to_datetime(point.time_unix_nano)) self.assertEqual(envelope.data.base_type, "MetricData") self.assertEqual(len(envelope.data.base_data.properties), 1) self.assertEqual(envelope.data.base_data.properties["test"], "attribute") @@ -269,12 +271,17 @@ def test_point_to_envelope_histogram(self): self.assertEqual(envelope.data.base_data.metrics[0].value, 31) self.assertEqual(envelope.data.base_data.metrics[0].count, 7) - @mock.patch.dict("os.environ", { - "OTEL_METRICS_EXPORTER": " foo, otlp, bar", - "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "TEST_ENDPOINT", - }) + @mock.patch.dict( + "os.environ", + { + "OTEL_METRICS_EXPORTER": " foo, otlp, bar", + "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "TEST_ENDPOINT", + }, + ) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=True) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True + ) def test_point_to_envelope_aks_amw(self, attach_mock, aks_mock): exporter = self._exporter resource = Resource.create(attributes={"asd": "test_resource"}) @@ -284,12 +291,17 @@ def test_point_to_envelope_aks_amw(self, attach_mock, aks_mock): self.assertEqual(len(envelope.data.base_data.properties), 2) self.assertEqual(envelope.data.base_data.properties["_MS.SentToAMW"], "True") - @mock.patch.dict("os.environ", { - "OTEL_METRICS_EXPORTER": " foo, otlp, bar", - "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "TEST_ENDPOINT", - }) + @mock.patch.dict( + "os.environ", + { + "OTEL_METRICS_EXPORTER": " foo, otlp, bar", + "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "TEST_ENDPOINT", + }, + ) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=True) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True + ) def test_point_to_envelope_statsbeat(self, attach_mock, aks_mock): exporter = AzureMonitorMetricExporter( is_sdkstats=True, @@ -299,12 +311,17 @@ def test_point_to_envelope_statsbeat(self, attach_mock, aks_mock): self.assertEqual(len(envelope.data.base_data.properties), 1) self.assertNotIn("_MS.SentToAMW", envelope.data.base_data.properties) - @mock.patch.dict("os.environ", { - "OTEL_METRICS_EXPORTER": " foo ,otlp ,bar", - "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "TEST_ENDPOINT", - }) + @mock.patch.dict( + "os.environ", + { + "OTEL_METRICS_EXPORTER": " foo ,otlp ,bar", + "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "TEST_ENDPOINT", + }, + ) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=False) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True + ) def test_point_to_envelope_otlp_no_aks(self, attach_mock, aks_mock): exporter = self._exporter resource = Resource.create(attributes={"asd": "test_resource"}) @@ -313,12 +330,17 @@ def test_point_to_envelope_otlp_no_aks(self, attach_mock, aks_mock): self.assertEqual(len(envelope.data.base_data.properties), 1) self.assertNotIn("_MS.SentToAMW", envelope.data.base_data.properties) - @mock.patch.dict("os.environ", { - "OTEL_METRICS_EXPORTER": " foo ,otlp ,bar", - "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "TEST_ENDPOINT", - }) + @mock.patch.dict( + "os.environ", + { + "OTEL_METRICS_EXPORTER": " foo ,otlp ,bar", + "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "TEST_ENDPOINT", + }, + ) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=True) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=False) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=False + ) def test_point_to_envelope_otlp_aks_no_attach(self, attach_mock, aks_mock): exporter = self._exporter resource = Resource.create(attributes={"asd": "test_resource"}) @@ -327,12 +349,17 @@ def test_point_to_envelope_otlp_aks_no_attach(self, attach_mock, aks_mock): self.assertEqual(len(envelope.data.base_data.properties), 1) self.assertNotIn("_MS.SentToAMW", envelope.data.base_data.properties) - @mock.patch.dict("os.environ", { - "OTEL_METRICS_EXPORTER": " foo, bar", - "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "TEST_ENDPOINT", - }) + @mock.patch.dict( + "os.environ", + { + "OTEL_METRICS_EXPORTER": " foo, bar", + "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "TEST_ENDPOINT", + }, + ) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=True) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True + ) def test_point_to_envelope_aks_attach_no_otlp(self, attach_mock, aks_mock): exporter = self._exporter resource = Resource.create(attributes={"asd": "test_resource"}) @@ -342,11 +369,16 @@ def test_point_to_envelope_aks_attach_no_otlp(self, attach_mock, aks_mock): self.assertEqual(len(envelope.data.base_data.properties), 2) self.assertEqual(envelope.data.base_data.properties["_MS.SentToAMW"], "False") - @mock.patch.dict("os.environ", { - "OTEL_METRICS_EXPORTER": " foo ,otlp ,bar", - }) + @mock.patch.dict( + "os.environ", + { + "OTEL_METRICS_EXPORTER": " foo ,otlp ,bar", + }, + ) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=True) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True + ) def test_point_to_envelope_aks_attach_no_endpoint(self, attach_mock, aks_mock): exporter = self._exporter resource = Resource.create(attributes={"asd": "test_resource"}) @@ -369,7 +401,7 @@ def test_point_to_envelope_metric_namespace(self): envelope = exporter._point_to_envelope(point, "test name", resource, scope) self.assertEqual(envelope.instrumentation_key, exporter._instrumentation_key) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Metric") - self.assertEqual(envelope.time, ns_to_iso_str(point.time_unix_nano)) + self.assertEqual(envelope.time, ns_to_datetime(point.time_unix_nano)) self.assertEqual(envelope.data.base_type, "MetricData") self.assertEqual(len(envelope.data.base_data.properties), 1) self.assertEqual(envelope.data.base_data.properties["test"], "attribute") @@ -416,7 +448,7 @@ def test_point_to_envelope_std_metric_client_duration(self, target_mock): envelope = exporter._point_to_envelope(point, "http.client.duration", resource) self.assertEqual(envelope.instrumentation_key, exporter._instrumentation_key) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Metric") - self.assertEqual(envelope.time, ns_to_iso_str(point.time_unix_nano)) + self.assertEqual(envelope.time, ns_to_datetime(point.time_unix_nano)) self.assertEqual(envelope.data.base_type, "MetricData") self.assertEqual(envelope.data.base_data.properties["_MS.MetricId"], "dependencies/duration") self.assertEqual(envelope.data.base_data.properties["_MS.IsAutocollected"], "True") @@ -455,7 +487,7 @@ def test_point_to_envelope_std_metric_client_duration(self, target_mock): envelope = exporter._point_to_envelope(point, "http.client.request.duration", resource) self.assertEqual(envelope.instrumentation_key, exporter._instrumentation_key) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Metric") - self.assertEqual(envelope.time, ns_to_iso_str(point.time_unix_nano)) + self.assertEqual(envelope.time, ns_to_datetime(point.time_unix_nano)) self.assertEqual(envelope.data.base_type, "MetricData") self.assertEqual(envelope.data.base_data.properties["_MS.MetricId"], "dependencies/duration") self.assertEqual(envelope.data.base_data.properties["_MS.IsAutocollected"], "True") @@ -491,7 +523,7 @@ def test_point_to_envelope_std_metric_server_duration(self): envelope = exporter._point_to_envelope(point, "http.server.duration", resource) self.assertEqual(envelope.instrumentation_key, exporter._instrumentation_key) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Metric") - self.assertEqual(envelope.time, ns_to_iso_str(point.time_unix_nano)) + self.assertEqual(envelope.time, ns_to_datetime(point.time_unix_nano)) self.assertEqual(envelope.data.base_type, "MetricData") self.assertEqual(envelope.data.base_data.properties["_MS.MetricId"], "requests/duration") self.assertEqual(envelope.data.base_data.properties["_MS.IsAutocollected"], "True") @@ -533,7 +565,7 @@ def test_point_to_envelope_std_metric_server_duration(self): envelope = exporter._point_to_envelope(point, "http.server.request.duration", resource) self.assertEqual(envelope.instrumentation_key, exporter._instrumentation_key) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Metric") - self.assertEqual(envelope.time, ns_to_iso_str(point.time_unix_nano)) + self.assertEqual(envelope.time, ns_to_datetime(point.time_unix_nano)) self.assertEqual(envelope.data.base_type, "MetricData") self.assertEqual(envelope.data.base_data.properties["_MS.MetricId"], "requests/duration") self.assertEqual(envelope.data.base_data.properties["_MS.IsAutocollected"], "True") @@ -591,7 +623,7 @@ def test_point_to_envelope_std_metric_client_duration_log_analytics_disabled(sel envelope = exporter._point_to_envelope(point, "http.client.duration", resource) self.assertEqual(envelope.instrumentation_key, exporter._instrumentation_key) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Metric") - self.assertEqual(envelope.time, ns_to_iso_str(point.time_unix_nano)) + self.assertEqual(envelope.time, ns_to_datetime(point.time_unix_nano)) self.assertEqual(envelope.data.base_type, "MetricData") self.assertEqual(envelope.data.base_data.properties["_MS.MetricId"], "dependencies/duration") self.assertEqual(envelope.data.base_data.properties["_MS.IsAutocollected"], "True") @@ -629,7 +661,7 @@ def test_point_to_envelope_std_metric_client_duration_log_analytics_disabled(sel envelope = exporter._point_to_envelope(point, "http.client.request.duration", resource) self.assertEqual(envelope.instrumentation_key, exporter._instrumentation_key) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Metric") - self.assertEqual(envelope.time, ns_to_iso_str(point.time_unix_nano)) + self.assertEqual(envelope.time, ns_to_datetime(point.time_unix_nano)) self.assertEqual(envelope.data.base_type, "MetricData") self.assertEqual(envelope.data.base_data.properties["_MS.MetricId"], "dependencies/duration") self.assertEqual(envelope.data.base_data.properties["_MS.IsAutocollected"], "True") @@ -664,7 +696,7 @@ def test_point_to_envelope_std_metric_server_duration_log_analytics_disabled(sel envelope = exporter._point_to_envelope(point, "http.server.duration", resource) self.assertEqual(envelope.instrumentation_key, exporter._instrumentation_key) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Metric") - self.assertEqual(envelope.time, ns_to_iso_str(point.time_unix_nano)) + self.assertEqual(envelope.time, ns_to_datetime(point.time_unix_nano)) self.assertEqual(envelope.data.base_type, "MetricData") self.assertEqual(envelope.data.base_data.properties["_MS.MetricId"], "requests/duration") self.assertEqual(envelope.data.base_data.properties["_MS.IsAutocollected"], "True") @@ -705,7 +737,7 @@ def test_point_to_envelope_std_metric_server_duration_log_analytics_disabled(sel envelope = exporter._point_to_envelope(point, "http.server.request.duration", resource) self.assertEqual(envelope.instrumentation_key, exporter._instrumentation_key) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.Metric") - self.assertEqual(envelope.time, ns_to_iso_str(point.time_unix_nano)) + self.assertEqual(envelope.time, ns_to_datetime(point.time_unix_nano)) self.assertEqual(envelope.data.base_type, "MetricData") self.assertEqual(envelope.data.base_data.properties["_MS.MetricId"], "requests/duration") self.assertEqual(envelope.data.base_data.properties["_MS.IsAutocollected"], "True") @@ -763,80 +795,84 @@ def test_point_to_envelope_std_metric_log_analytics_disabled(self): self.assertIsNone(envelope) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=True) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True + ) def test_constructor_log_analytics_enabled_default_aks(self, attach_mock, aks_mock): exporter = AzureMonitorMetricExporter() self.assertTrue(exporter._metrics_to_log_analytics) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=False) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True + ) def test_constructor_log_analytics_enabled_default_off_aks(self, attach_mock, aks_mock): exporter = AzureMonitorMetricExporter() self.assertTrue(exporter._metrics_to_log_analytics) - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED": " TRUE" - }) + @mock.patch.dict("os.environ", {"APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED": " TRUE"}) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=True) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True + ) def test_constructor_log_analytics_enabled_env_var(self, attach_mock, aks_mock): exporter = AzureMonitorMetricExporter() self.assertTrue(exporter._metrics_to_log_analytics) - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED": " false " - }) + @mock.patch.dict("os.environ", {"APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED": " false "}) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=False) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True + ) def test_constructor_log_analytics_disabled_env_var_off_aks(self, attach_mock, aks_mock): exporter = AzureMonitorMetricExporter() # APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED is currently only specified for AKS Attach self.assertTrue(exporter._metrics_to_log_analytics) - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED": " false " - }) + @mock.patch.dict("os.environ", {"APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED": " false "}) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=True) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=False) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=False + ) def test_constructor_log_analytics_disabled_env_var_manual_aks(self, attach_mock, aks_mock): exporter = AzureMonitorMetricExporter() # APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED is currently only specified for AKS Attach self.assertTrue(exporter._metrics_to_log_analytics) - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED": " false " - }) + @mock.patch.dict("os.environ", {"APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED": " false "}) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=True) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True + ) def test_constructor_log_analytics_disabled_env_var(self, attach_mock, aks_mock): exporter = AzureMonitorMetricExporter() self.assertFalse(exporter._metrics_to_log_analytics) - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED": " false " - }) + @mock.patch.dict("os.environ", {"APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED": " false "}) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=True) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True + ) def test_constructor_log_analytics_statsbeat(self, attach_mock, aks_mock): exporter = AzureMonitorMetricExporter( is_sdkstats=True, ) self.assertTrue(exporter._metrics_to_log_analytics) - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED": "falser" - }) + @mock.patch.dict("os.environ", {"APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED": "falser"}) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=True) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True + ) def test_constructor_log_analytics_invalid_env_var(self, attach_mock, aks_mock): exporter = AzureMonitorMetricExporter() self.assertTrue(exporter._metrics_to_log_analytics) - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED": "" - }) + @mock.patch.dict("os.environ", {"APPLICATIONINSIGHTS_METRICS_TO_LOGANALYTICS_ENABLED": ""}) @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_on_aks", return_value=True) - @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True) + @mock.patch( + "azure.monitor.opentelemetry.exporter.export.metrics._exporter._utils._is_attach_enabled", return_value=True + ) def test_constructor_log_analytics_blank_env_var(self, attach_mock, aks_mock): exporter = AzureMonitorMetricExporter() self.assertTrue(exporter._metrics_to_log_analytics) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_constants.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_constants.py index 7f9a16fb37ae..f53dd2d85be9 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_constants.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_constants.py @@ -15,9 +15,7 @@ _PROCESSOR_TIME, _PERFORMANCE_COUNTER_METRIC_NAME_MAPPINGS, ) -from azure.monitor.opentelemetry.exporter._quickpulse._constants import ( - _QUICKPULSE_METRIC_NAME_MAPPINGS -) +from azure.monitor.opentelemetry.exporter._quickpulse._constants import _QUICKPULSE_METRIC_NAME_MAPPINGS class TestPerformanceCounterConstants(unittest.TestCase): @@ -42,7 +40,9 @@ def test_request_execution_time_constant(self): self.assertIsInstance(_REQUEST_EXECUTION_TIME, tuple) self.assertEqual(len(_REQUEST_EXECUTION_TIME), 2) self.assertEqual(_REQUEST_EXECUTION_TIME[0], "azuremonitor.performancecounter.requestexecutiontime") - self.assertEqual(_REQUEST_EXECUTION_TIME[1], "\\ASP.NET Applications(??APP_W3SVC_PROC??)\\Request Execution Time") + self.assertEqual( + _REQUEST_EXECUTION_TIME[1], "\\ASP.NET Applications(??APP_W3SVC_PROC??)\\Request Execution Time" + ) def test_request_rate_constant(self): """Test request rate constant values.""" @@ -99,10 +99,10 @@ def test_performance_counter_metric_name_mappings(self): _PROCESS_PRIVATE_BYTES, _PROCESSOR_TIME, ] - + self.assertIsInstance(_PERFORMANCE_COUNTER_METRIC_NAME_MAPPINGS, dict) self.assertEqual(len(_PERFORMANCE_COUNTER_METRIC_NAME_MAPPINGS), len(expected_mappings)) - + for metric_tuple in expected_mappings: otel_name, quickpulse_name = metric_tuple self.assertIn(otel_name, _PERFORMANCE_COUNTER_METRIC_NAME_MAPPINGS) @@ -121,7 +121,7 @@ def test_all_constants_are_tuples_with_two_elements(self): _PROCESS_PRIVATE_BYTES, _PROCESSOR_TIME, ] - + for constant in constants: with self.subTest(constant=constant): self.assertIsInstance(constant, tuple) @@ -142,12 +142,11 @@ def test_opentelemetry_metric_names_are_unique(self): _PROCESS_PRIVATE_BYTES, _PROCESSOR_TIME, ] - + otel_names = [constant[0] for constant in constants] unique_names = set(otel_names) - - self.assertEqual(len(otel_names), len(unique_names), - "Duplicate OpenTelemetry metric names found") + + self.assertEqual(len(otel_names), len(unique_names), "Duplicate OpenTelemetry metric names found") def test_quickpulse_perf_counters_unique_otel(self): """Test that all Quickpulse and Performance Counters metric names are unique.""" @@ -168,17 +167,18 @@ def test_opentelemetry_metric_names_follow_convention(self): _PROCESS_PRIVATE_BYTES, _PROCESSOR_TIME, ] - + expected_prefix = "azuremonitor.performancecounter." - + for constant in constants: otel_name = constant[0] with self.subTest(metric_name=otel_name): - self.assertTrue(otel_name.startswith(expected_prefix), - f"Metric name '{otel_name}' does not start with '{expected_prefix}'") + self.assertTrue( + otel_name.startswith(expected_prefix), + f"Metric name '{otel_name}' does not start with '{expected_prefix}'", + ) # Check that it doesn't end with the prefix (i.e., has additional content) - self.assertGreater(len(otel_name), len(expected_prefix), - f"Metric name '{otel_name}' is too short") + self.assertGreater(len(otel_name), len(expected_prefix), f"Metric name '{otel_name}' is too short") def test_quickpulse_metric_names_follow_convention(self): """Test that all Quickpulse metric names follow the expected Windows performance counter convention.""" @@ -193,14 +193,16 @@ def test_quickpulse_metric_names_follow_convention(self): _PROCESS_PRIVATE_BYTES, _PROCESSOR_TIME, ] - + for constant in constants: quickpulse_name = constant[1] with self.subTest(metric_name=quickpulse_name): # All should start with backslash (Windows performance counter format) - self.assertTrue(quickpulse_name.startswith("\\"), - f"Quickpulse name '{quickpulse_name}' does not start with '\\'") + self.assertTrue( + quickpulse_name.startswith("\\"), f"Quickpulse name '{quickpulse_name}' does not start with '\\'" + ) # Should contain at least one more backslash (category\\counter format) backslash_count = quickpulse_name.count("\\") - self.assertGreaterEqual(backslash_count, 2, - f"Quickpulse name '{quickpulse_name}' doesn't follow \\Category\\Counter format") + self.assertGreaterEqual( + backslash_count, 2, f"Quickpulse name '{quickpulse_name}' doesn't follow \\Category\\Counter format" + ) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_performance_counters.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_performance_counters.py index 0c15af74cf54..9fdbb1e36d62 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_performance_counters.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_performance_counters.py @@ -51,6 +51,7 @@ def setUp(self): """Reset global state before each test.""" # Import the module to reset globals import azure.monitor.opentelemetry.exporter._performance_counters._manager as manager_module + # TODO: _PROCESS.io_counters() is not available on Mac OS and some Linux distros. Find alternative. manager_module._IO_AVAILABLE = True manager_module._IO_LAST_COUNT = 0 @@ -64,9 +65,9 @@ def setUp(self): def test_get_process_cpu_success(self, mock_process): """Test successful process CPU retrieval.""" mock_process.cpu_percent.return_value = 25.5 - + result = list(_get_process_cpu(None)) - + self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].value, 25.5) mock_process.cpu_percent.assert_called_once_with(interval=None) @@ -75,9 +76,9 @@ def test_get_process_cpu_success(self, mock_process): def test_get_process_cpu_error(self, mock_process): """Test process CPU retrieval with error.""" mock_process.cpu_percent.side_effect = psutil.NoSuchProcess(1) - + result = list(_get_process_cpu(None)) - + self.assertEqual(len(result), 1) self.assertEqual(result[0].value, 0.0) @@ -86,9 +87,9 @@ def test_get_process_cpu_error(self, mock_process): def test_get_process_cpu_normalized_success(self, mock_process): """Test successful normalized process CPU retrieval.""" mock_process.cpu_percent.return_value = 80.0 - + result = list(_get_process_cpu_normalized(None)) - + self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].value, 20.0) # 80 / 4 CPUs mock_process.cpu_percent.assert_called_once_with(interval=None) @@ -97,7 +98,7 @@ def test_get_process_cpu_normalized_success(self, mock_process): def test_get_process_cpu_normalized_no_cpus(self): """Test normalized process CPU with no CPUs.""" result = list(_get_process_cpu_normalized(None)) - + self.assertEqual(len(result), 1) self.assertEqual(result[0].value, 0.0) @@ -107,9 +108,9 @@ def test_get_available_memory_success(self, mock_virtual_memory): mock_memory = MagicMock() mock_memory.available = 1073741824 # 1GB mock_virtual_memory.return_value = mock_memory - + result = list(_get_available_memory(None)) - + self.assertEqual(len(result), 1) self.assertEqual(result[0].value, 1073741824) @@ -117,9 +118,9 @@ def test_get_available_memory_success(self, mock_virtual_memory): def test_get_available_memory_error(self, mock_virtual_memory): """Test available memory retrieval with error.""" mock_virtual_memory.side_effect = Exception("Memory error") - + result = list(_get_available_memory(None)) - + self.assertEqual(len(result), 1) self.assertEqual(result[0].value, 0) @@ -129,9 +130,9 @@ def test_get_process_memory_success(self, mock_process): mock_memory_info = MagicMock() mock_memory_info.rss = 52428800 # 50MB mock_process.memory_info.return_value = mock_memory_info - + result = list(_get_process_memory(None)) - + self.assertEqual(len(result), 1) self.assertEqual(result[0].value, 52428800) @@ -139,9 +140,9 @@ def test_get_process_memory_success(self, mock_process): def test_get_process_memory_error(self, mock_process): """Test process memory retrieval with error.""" mock_process.memory_info.side_effect = psutil.AccessDenied(1) - + result = list(_get_process_memory(None)) - + self.assertEqual(len(result), 1) self.assertEqual(result[0].value, 0) @@ -153,22 +154,23 @@ def test_get_process_io_success(self, mock_process, mock_datetime): mock_io_counters = MagicMock() mock_io_counters.read_bytes = 2000 mock_io_counters.write_bytes = 3000 - + mock_process.io_counters.return_value = mock_io_counters - + # Setup time mocks start_time = datetime(2023, 1, 1, 12, 0, 0) end_time = datetime(2023, 1, 1, 12, 0, 2) # 2 seconds later - + mock_datetime.now.return_value = end_time - + # Import and modify global variables import azure.monitor.opentelemetry.exporter._performance_counters._manager as manager_module + manager_module._IO_LAST_COUNT = 3000 # Previous total manager_module._IO_LAST_TIME = start_time - + result = list(_get_process_io(None)) - + self.assertEqual(len(result), 1) # Expected: (5000 - 3000) / 2 seconds = 1000 bytes/sec self.assertAlmostEqual(result[0].value, 1000.0) @@ -179,21 +181,22 @@ def test_get_process_io_unavailable(self, mock_process, mock_datetime): """Test unavailable process I/O retrieval.""" # Setup unavailable I/O counters mock_process.io_counters.side_effect = AttributeError("'Process' object has no attribute 'io_counters'") - + # Setup time mocks start_time = datetime(2023, 1, 1, 12, 0, 0) end_time = datetime(2023, 1, 1, 12, 0, 2) # 2 seconds later - + mock_datetime.now.return_value = end_time - + # Import and modify global variables import azure.monitor.opentelemetry.exporter._performance_counters._manager as manager_module + manager_module._IO_AVAILABLE = 0 # Previous total manager_module._IO_LAST_COUNT = 0 # Previous total manager_module._IO_LAST_TIME = start_time - + result = list(_get_process_io(None)) - + self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].value, 0.0) @@ -202,20 +205,21 @@ def test_get_processor_time_success(self, mock_cpu_times): """Test successful processor time retrieval.""" # Create mock CPU times CpuTimes = collections.namedtuple("CpuTimes", ["user", "system", "idle", "nice"]) - + # First call (stored in _LAST_CPU_TIMES) first_times = CpuTimes(user=10.0, system=5.0, idle=80.0, nice=1.0) # Second call (current) second_times = CpuTimes(user=15.0, system=7.0, idle=85.0, nice=1.5) - + mock_cpu_times.return_value = second_times - + # Import and set up global state import azure.monitor.opentelemetry.exporter._performance_counters._manager as manager_module + manager_module._LAST_CPU_TIMES = first_times - + result = list(_get_processor_time(None)) - + self.assertEqual(len(result), 1) # Calculate expected utilization # Total delta: (15+7+85+1.5) - (10+5+80+1) = 108.5 - 96 = 12.5 @@ -225,17 +229,11 @@ def test_get_processor_time_success(self, mock_cpu_times): def test_get_cpu_times_total(self): """Test CPU times total calculation.""" - CpuTimes = collections.namedtuple( - "CpuTimes", - ["user", "system", "idle", "nice", "iowait", "irq", "softirq"] - ) - cpu_times = CpuTimes( - user=10.0, system=5.0, idle=80.0, nice=1.0, - iowait=2.0, irq=0.5, softirq=0.8 - ) - + CpuTimes = collections.namedtuple("CpuTimes", ["user", "system", "idle", "nice", "iowait", "irq", "softirq"]) + cpu_times = CpuTimes(user=10.0, system=5.0, idle=80.0, nice=1.0, iowait=2.0, irq=0.5, softirq=0.8) + total = _get_cpu_times_total(cpu_times) - + expected = 10.0 + 5.0 + 80.0 + 1.0 + 2.0 + 0.5 + 0.8 self.assertAlmostEqual(total, expected) @@ -244,20 +242,21 @@ def test_get_request_rate_success(self, mock_datetime): """Test successful request rate calculation.""" start_time = datetime(2023, 1, 1, 12, 0, 0) end_time = datetime(2023, 1, 1, 12, 0, 5) # 5 seconds later - + mock_datetime.now.side_effect = [end_time, end_time] - + # Import and set up global state import azure.monitor.opentelemetry.exporter._performance_counters._manager as manager_module + manager_module._REQUESTS_COUNT = 10 manager_module._LAST_REQUEST_RATE_TIME = start_time - + result = list(_get_request_rate(None)) - + self.assertEqual(len(result), 1) # Expected: 10 requests / 5 seconds = 2 req/sec self.assertAlmostEqual(result[0].value, 2.0) - + # Check that globals were reset self.assertEqual(manager_module._REQUESTS_COUNT, 0) @@ -266,20 +265,21 @@ def test_get_exception_rate_success(self, mock_datetime): """Test successful exception rate calculation.""" start_time = datetime(2023, 1, 1, 12, 0, 0) end_time = datetime(2023, 1, 1, 12, 0, 10) # 10 seconds later - + mock_datetime.now.side_effect = [end_time, end_time] - + # Import and set up global state import azure.monitor.opentelemetry.exporter._performance_counters._manager as manager_module + manager_module._EXCEPTIONS_COUNT = 5 manager_module._LAST_EXCEPTION_RATE_TIME = start_time - + result = list(_get_exception_rate(None)) - + self.assertEqual(len(result), 1) # Expected: 5 exceptions / 10 seconds = 0.5 exc/sec self.assertAlmostEqual(result[0].value, 0.5) - + # Check that globals were reset self.assertEqual(manager_module._EXCEPTIONS_COUNT, 0) @@ -295,7 +295,7 @@ def setUp(self): def test_available_memory_initialization(self): """Test AvailableMemory class initialization.""" counter = AvailableMemory(self.meter) - + self.assertIsNotNone(counter.gauge) self.assertEqual(counter.NAME[0], "azuremonitor.performancecounter.memoryavailablebytes") self.assertEqual(counter.NAME[1], "\\Memory\\Available Bytes") @@ -303,7 +303,7 @@ def test_available_memory_initialization(self): def test_exception_rate_initialization(self): """Test ExceptionRate class initialization.""" counter = ExceptionRate(self.meter) - + self.assertIsNotNone(counter.gauge) self.assertEqual(counter.NAME[0], "azuremonitor.performancecounter.exceptionssec") self.assertEqual(counter.NAME[1], "\\.NET CLR Exceptions(??APP_CLR_PROC??)\\# of Exceps Thrown / sec") @@ -311,7 +311,7 @@ def test_exception_rate_initialization(self): def test_request_execution_time_initialization(self): """Test RequestExecutionTime class initialization.""" counter = RequestExecutionTime(self.meter) - + self.assertIsNotNone(counter.gauge) self.assertEqual(counter.NAME[0], "azuremonitor.performancecounter.requestexecutiontime") self.assertEqual(counter.NAME[1], "\\ASP.NET Applications(??APP_W3SVC_PROC??)\\Request Execution Time") @@ -319,7 +319,7 @@ def test_request_execution_time_initialization(self): def test_request_rate_initialization(self): """Test RequestRate class initialization.""" counter = RequestRate(self.meter) - + self.assertIsNotNone(counter.gauge) self.assertEqual(counter.NAME[0], "azuremonitor.performancecounter.requestssec") self.assertEqual(counter.NAME[1], "\\ASP.NET Applications(??APP_W3SVC_PROC??)\\Requests/Sec") @@ -327,7 +327,7 @@ def test_request_rate_initialization(self): def test_process_cpu_initialization(self): """Test ProcessCpu class initialization.""" counter = ProcessCpu(self.meter) - + self.assertIsNotNone(counter.gauge) self.assertEqual(counter.NAME[0], "azuremonitor.performancecounter.processtime") self.assertEqual(counter.NAME[1], "\\Process(??APP_WIN32_PROC??)\\% Processor Time") @@ -335,7 +335,7 @@ def test_process_cpu_initialization(self): def test_process_cpu_normalized_initialization(self): """Test ProcessCpuNormalized class initialization.""" counter = ProcessCpuNormalized(self.meter) - + self.assertIsNotNone(counter.gauge) self.assertEqual(counter.NAME[0], "azuremonitor.performancecounter.processtimenormalized") self.assertEqual(counter.NAME[1], "\\Process(??APP_WIN32_PROC??)\\% Processor Time Normalized") @@ -343,7 +343,7 @@ def test_process_cpu_normalized_initialization(self): def test_process_io_rate_initialization(self): """Test ProcessIORate class initialization.""" counter = ProcessIORate(self.meter) - + self.assertIsNotNone(counter.gauge) self.assertEqual(counter.NAME[0], "azuremonitor.performancecounter.processiobytessec") self.assertEqual(counter.NAME[1], "\\Process(??APP_WIN32_PROC??)\\IO Data Bytes/sec") @@ -351,7 +351,7 @@ def test_process_io_rate_initialization(self): def test_process_private_bytes_initialization(self): """Test ProcessPrivateBytes class initialization.""" counter = ProcessPrivateBytes(self.meter) - + self.assertIsNotNone(counter.gauge) self.assertEqual(counter.NAME[0], "azuremonitor.performancecounter.processprivatebytes") self.assertEqual(counter.NAME[1], "\\Process(??APP_WIN32_PROC??)\\Private Bytes") @@ -359,7 +359,7 @@ def test_process_private_bytes_initialization(self): def test_processor_time_initialization(self): """Test ProcessorTime class initialization.""" counter = ProcessorTime(self.meter) - + self.assertIsNotNone(counter.gauge) self.assertEqual(counter.NAME[0], "azuremonitor.performancecounter.processortotalprocessortime") self.assertEqual(counter.NAME[1], "\\Processor(_Total)\\% Processor Time") @@ -388,13 +388,13 @@ def test_manager_initialization_success(self, mock_get_meter_provider): mock_meter = MagicMock() mock_meter_provider.get_meter.return_value = mock_meter mock_get_meter_provider.return_value = mock_meter_provider - + # Mock create_observable_gauge and create_histogram to return mock objects mock_meter.create_observable_gauge.return_value = MagicMock() mock_meter.create_histogram.return_value = MagicMock() - + manager = _PerformanceCountersManager() - + self.assertEqual(len(manager._performance_counters), len(PERFORMANCE_COUNTER_METRICS)) mock_meter_provider.get_meter.assert_called_once() @@ -406,13 +406,13 @@ def test_manager_initialization_success_no_io(self, mock_get_meter_provider): mock_meter = MagicMock() mock_meter_provider.get_meter.return_value = mock_meter mock_get_meter_provider.return_value = mock_meter_provider - + # Mock create_observable_gauge and create_histogram to return mock objects mock_meter.create_observable_gauge.return_value = MagicMock() mock_meter.create_histogram.return_value = MagicMock() - + manager = _PerformanceCountersManager() - + # TODO: _PROCESS.io_counters() is not available on Mac OS and some Linux distros. Find alternative. self.assertEqual(len(manager._performance_counters), len(PERFORMANCE_COUNTER_METRICS) - 1) mock_meter_provider.get_meter.assert_called_once() @@ -422,22 +422,22 @@ def test_manager_initialization_with_custom_meter_provider(self): mock_meter_provider = MagicMock() mock_meter = MagicMock() mock_meter_provider.get_meter.return_value = mock_meter - + # Mock create_observable_gauge and create_histogram to return mock objects mock_meter.create_observable_gauge.return_value = MagicMock() mock_meter.create_histogram.return_value = MagicMock() - + manager = _PerformanceCountersManager(meter_provider=mock_meter_provider) - + mock_meter_provider.get_meter.assert_called_once() @mock.patch("azure.monitor.opentelemetry.exporter._performance_counters._manager.metrics.get_meter_provider") def test_manager_initialization_failure(self, mock_get_meter_provider): """Test manager initialization failure.""" mock_get_meter_provider.side_effect = Exception("Meter provider error") - + manager = _PerformanceCountersManager() - + # Manager should handle the exception gracefully self.assertIsNotNone(manager) @@ -445,7 +445,7 @@ def test_manager_singleton_behavior(self): """Test that manager follows singleton pattern.""" manager1 = _PerformanceCountersManager() manager2 = _PerformanceCountersManager() - + self.assertIs(manager1, manager2) @mock.patch("azure.monitor.opentelemetry.exporter._performance_counters._manager.metrics.get_meter_provider") @@ -455,23 +455,24 @@ def test_record_span_consumer(self, mock_get_meter_provider): mock_meter = MagicMock() mock_meter_provider.get_meter.return_value = mock_meter mock_get_meter_provider.return_value = mock_meter_provider - + manager = _PerformanceCountersManager() manager._request_duration_histogram = MagicMock() - + # Create a mock span mock_span = MagicMock(spec=ReadableSpan) mock_span.kind = SpanKind.CONSUMER mock_span.start_time = 1000000000 # 1 second in nanoseconds - mock_span.end_time = 2000000000 # 2 seconds in nanoseconds + mock_span.end_time = 2000000000 # 2 seconds in nanoseconds mock_span.events = [] - + # Import to access global counter import azure.monitor.opentelemetry.exporter._performance_counters._manager as manager_module + initial_count = manager_module._REQUESTS_COUNT - + manager._record_span(mock_span) - + # Check that request was counted and duration recorded self.assertEqual(manager_module._REQUESTS_COUNT, initial_count + 1) manager._request_duration_histogram.record.assert_called_once_with(1.0) # 1 second duration @@ -483,23 +484,24 @@ def test_record_span_request(self, mock_get_meter_provider): mock_meter = MagicMock() mock_meter_provider.get_meter.return_value = mock_meter mock_get_meter_provider.return_value = mock_meter_provider - + manager = _PerformanceCountersManager() manager._request_duration_histogram = MagicMock() - + # Create a mock span mock_span = MagicMock(spec=ReadableSpan) mock_span.kind = SpanKind.SERVER mock_span.start_time = 1000000000 # 1 second in nanoseconds - mock_span.end_time = 2000000000 # 2 seconds in nanoseconds + mock_span.end_time = 2000000000 # 2 seconds in nanoseconds mock_span.events = [] - + # Import to access global counter import azure.monitor.opentelemetry.exporter._performance_counters._manager as manager_module + initial_count = manager_module._REQUESTS_COUNT - + manager._record_span(mock_span) - + # Check that request was counted and duration recorded self.assertEqual(manager_module._REQUESTS_COUNT, initial_count + 1) manager._request_duration_histogram.record.assert_called_once_with(1.0) # 1 second duration @@ -511,43 +513,45 @@ def test_record_span_with_exception(self, mock_get_meter_provider): mock_meter = MagicMock() mock_meter_provider.get_meter.return_value = mock_meter mock_get_meter_provider.return_value = mock_meter_provider - + manager = _PerformanceCountersManager() manager._request_duration_histogram = MagicMock() - + # Create a mock span with exception event mock_event = MagicMock() mock_event.name = "exception" - + mock_span = MagicMock(spec=ReadableSpan) mock_span.kind = SpanKind.SERVER mock_span.start_time = 1000000000 mock_span.end_time = 2000000000 mock_span.events = [mock_event] - + # Import to access global counter import azure.monitor.opentelemetry.exporter._performance_counters._manager as manager_module + initial_exceptions = manager_module._EXCEPTIONS_COUNT - + manager._record_span(mock_span) - + # Check that exception was counted self.assertEqual(manager_module._EXCEPTIONS_COUNT, initial_exceptions + 1) def test_record_span_non_server_consumer_kind(self): """Test recording span that's not a server/consumer kind.""" manager = _PerformanceCountersManager() - + # Create a mock span with CLIENT kind mock_span = MagicMock(spec=ReadableSpan) mock_span.kind = SpanKind.CLIENT - + # Import to access global counter import azure.monitor.opentelemetry.exporter._performance_counters._manager as manager_module + initial_count = manager_module._REQUESTS_COUNT - + manager._record_span(mock_span) - + # Request count should not change self.assertEqual(manager_module._REQUESTS_COUNT, initial_count) @@ -558,32 +562,30 @@ def test_record_log_record_with_exception(self, mock_get_meter_provider): mock_meter = MagicMock() mock_meter_provider.get_meter.return_value = mock_meter mock_get_meter_provider.return_value = mock_meter_provider - + manager = _PerformanceCountersManager() - + # Create a mock log data with exception attributes mock_log_record = MagicMock() - mock_log_record.attributes = { - EXCEPTION_TYPE: "ValueError", - EXCEPTION_MESSAGE: "Test exception" - } - + mock_log_record.attributes = {EXCEPTION_TYPE: "ValueError", EXCEPTION_MESSAGE: "Test exception"} + mock_readable_log_record = MagicMock(spec=ReadableLogRecord) mock_readable_log_record.log_record = mock_log_record # Import to access global counter import azure.monitor.opentelemetry.exporter._performance_counters._manager as manager_module + initial_exceptions = manager_module._EXCEPTIONS_COUNT manager._record_log_record(mock_readable_log_record) - + # Check that exception was counted self.assertEqual(manager_module._EXCEPTIONS_COUNT, initial_exceptions + 1) def test_record_log_record_without_exception(self): """Test recording log record without exception attributes.""" manager = _PerformanceCountersManager() - + # Create a mock log data without exception attributes mock_log_record = MagicMock() mock_log_record.attributes = {"normal": "attribute"} @@ -593,10 +595,11 @@ def test_record_log_record_without_exception(self): # Import to access global counter import azure.monitor.opentelemetry.exporter._performance_counters._manager as manager_module + initial_exceptions = manager_module._EXCEPTIONS_COUNT manager._record_log_record(mock_readable_log_record) - + # Exception count should not change self.assertEqual(manager_module._EXCEPTIONS_COUNT, initial_exceptions) @@ -618,10 +621,13 @@ def test_enable_performance_counters_default_provider(self): """Test enabling performance counters with default provider.""" # Create a proper meter provider for testing meter_provider = MeterProvider() - - with mock.patch("azure.monitor.opentelemetry.exporter._performance_counters._manager.metrics.get_meter_provider", return_value=meter_provider): + + with mock.patch( + "azure.monitor.opentelemetry.exporter._performance_counters._manager.metrics.get_meter_provider", + return_value=meter_provider, + ): enable_performance_counters() - + # Should create a manager instance # self.assertIsNotNone(_PerformanceCountersManager._instance) self.assertIn(_PerformanceCountersManager, Singleton._instances) @@ -630,9 +636,9 @@ def test_enable_performance_counters_custom_provider(self): """Test enabling performance counters with custom provider.""" # Create a proper meter provider for testing meter_provider = MeterProvider() - + enable_performance_counters(meter_provider=meter_provider) - + # Should create a manager instance # self.assertIsNotNone(_PerformanceCountersManager._instance) self.assertIn(_PerformanceCountersManager, Singleton._instances) @@ -654,9 +660,9 @@ def test_performance_counter_metrics_list(self): ProcessPrivateBytes, ProcessorTime, ] - + self.assertEqual(len(PERFORMANCE_COUNTER_METRICS), len(expected_classes)) - + for expected_class in expected_classes: self.assertIn(expected_class, PERFORMANCE_COUNTER_METRICS) @@ -681,7 +687,7 @@ def setUp(self): # Reset singleton if _PerformanceCountersManager in Singleton._instances: del Singleton._instances[_PerformanceCountersManager] - + # Create a metrics setup that allows us to read metrics self.reader = InMemoryMetricReader() self.meter_provider = MeterProvider(metric_readers=[self.reader]) @@ -706,7 +712,7 @@ def _get_metric_value(self, metrics_data, metric_name): for scope_metrics in resource_metrics.scope_metrics: for metric in scope_metrics.metrics: if metric.name == metric_name: - if hasattr(metric.data, 'data_points') and metric.data.data_points: + if hasattr(metric.data, "data_points") and metric.data.data_points: return metric.data.data_points[0].value return None @@ -715,17 +721,17 @@ def test_available_memory_metric_generation(self, mock_virtual_memory): """Test that available memory metrics are generated correctly.""" # Mock available memory mock_virtual_memory.return_value.available = 2147483648 # 2GB - + # Initialize performance counters - use real meter provider, not mocked manager = _PerformanceCountersManager(meter_provider=self.meter_provider) - + # Force metrics collection metrics_data = self.reader.get_metrics_data() - + # Verify that available memory metric was created metric_names = self._get_metric_names(metrics_data) self.assertIn("azuremonitor.performancecounter.memoryavailablebytes", metric_names) - + # Verify the metric value memory_value = self._get_metric_value(metrics_data, "azuremonitor.performancecounter.memoryavailablebytes") self.assertEqual(memory_value, 2147483648) # Should match our mocked value @@ -737,18 +743,17 @@ def test_process_memory_metric_generation(self, mock_process): mock_memory_info = MagicMock() mock_memory_info.rss = 104857600 # 100MB mock_process.memory_info.return_value = mock_memory_info - + # Initialize performance counters - use real meter provider, not mocked manager = _PerformanceCountersManager(meter_provider=self.meter_provider) - + # Force metrics collection metrics_data = self.reader.get_metrics_data() - + # Verify that process memory metric was created metric_names = self._get_metric_names(metrics_data) self.assertIn("azuremonitor.performancecounter.processprivatebytes", metric_names) - + # Verify the metric value memory_value = self._get_metric_value(metrics_data, "azuremonitor.performancecounter.processprivatebytes") self.assertEqual(memory_value, 104857600) # Should match our mocked value - diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_processor.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_processor.py index 132c06b6763d..1ce7492b20b6 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_processor.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/performance_counters/test_processor.py @@ -38,9 +38,9 @@ def test_on_emit_with_manager(self, mock_manager_class): # Setup mock manager mock_manager = MagicMock() mock_manager_class.return_value = mock_manager - + processor = _PerformanceCountersLogRecordProcessor() - + # Create mock log data mock_readable_log_record = MagicMock(spec=ReadableLogRecord) @@ -53,10 +53,10 @@ def test_on_emit_with_manager(self, mock_manager_class): def test_emit_calls_on_emit(self): """Test emit method calls on_emit.""" processor = _PerformanceCountersLogRecordProcessor() - + # Mock the on_emit method processor.on_emit = MagicMock() - + # Create mock log data mock_readable_log_record = MagicMock(spec=ReadableLogRecord) @@ -68,14 +68,14 @@ def test_emit_calls_on_emit(self): def test_shutdown(self): """Test shutdown method.""" processor = _PerformanceCountersLogRecordProcessor() - + # Should not raise exception processor.shutdown() def test_force_flush(self): """Test force_flush method.""" processor = _PerformanceCountersLogRecordProcessor() - + # Should not raise exception processor.force_flush() processor.force_flush(timeout_millis=5000) @@ -87,12 +87,12 @@ def test_exception_propagation_in_on_emit(self, mock_manager_class): mock_manager = MagicMock() mock_manager._record_log_record.side_effect = Exception("Test error") mock_manager_class.return_value = mock_manager - + processor = _PerformanceCountersLogRecordProcessor() - + # Create mock log data mock_readable_log_record = MagicMock(spec=ReadableLogRecord) - + # Exception should be propagated with self.assertRaises(Exception) as context: processor.on_emit(mock_readable_log_record) @@ -119,14 +119,14 @@ def test_on_end_with_manager(self, mock_manager_class): # Setup mock manager mock_manager = MagicMock() mock_manager_class.return_value = mock_manager - + processor = _PerformanceCountersSpanProcessor() - + # Create mock span mock_span = MagicMock(spec=ReadableSpan) - + processor.on_end(mock_span) - + # Verify manager was called mock_manager_class.assert_called_once() mock_manager._record_span.assert_called_once_with(mock_span) @@ -138,27 +138,27 @@ def test_exception_propagation_in_on_end(self, mock_manager_class): mock_manager = MagicMock() mock_manager._record_span.side_effect = Exception("Test error") mock_manager_class.return_value = mock_manager - + processor = _PerformanceCountersSpanProcessor() - + # Create mock span mock_span = MagicMock(spec=ReadableSpan) - + # Exception should be propagated with self.assertRaises(Exception) as context: processor.on_end(mock_span) - + self.assertEqual(str(context.exception), "Test error") def test_on_end_calls_super(self): """Test on_end calls super method.""" processor = _PerformanceCountersSpanProcessor() - + # Mock the super class method - with mock.patch.object(processor.__class__.__bases__[0], 'on_end') as mock_super_on_end: + with mock.patch.object(processor.__class__.__bases__[0], "on_end") as mock_super_on_end: mock_span = MagicMock(spec=ReadableSpan) - + processor.on_end(mock_span) - + # Verify super was called mock_super_on_end.assert_called_once_with(mock_span) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_cpu.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_cpu.py index d877dab05e45..8dbb5eafdb77 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_cpu.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_cpu.py @@ -51,4 +51,5 @@ def test_process_time(self, process_mock, process_time_mock, elapsed_time_mock): num_cpus = psutil.cpu_count() self.assertAlmostEqual(obs.value, 1.2 / num_cpus, delta=1) + # cSpell:enable diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_filter.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_filter.py index bf20493dbbd9..fe45d6caec5d 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_filter.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_filter.py @@ -21,17 +21,17 @@ class TestFilter(unittest.TestCase): - @mock.patch('azure.monitor.opentelemetry.exporter._quickpulse._filter._parse_document_filter_configuration') - @mock.patch('azure.monitor.opentelemetry.exporter._quickpulse._filter._clear_quickpulse_projection_map') - @mock.patch('azure.monitor.opentelemetry.exporter._quickpulse._filter._parse_metric_filter_configuration') - @mock.patch('azure.monitor.opentelemetry.exporter._quickpulse._filter._set_quickpulse_etag') + @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._filter._parse_document_filter_configuration") + @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._filter._clear_quickpulse_projection_map") + @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._filter._parse_metric_filter_configuration") + @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._filter._set_quickpulse_etag") def test_update_filter_configuration(self, mock_set_etag, mock_parse_metric, mock_clear_projection, mock_parse_doc): etag = "new_etag" config = {"key": "value"} config_bytes = json.dumps(config).encode("utf-8") - + _update_filter_configuration(etag, config_bytes) - + mock_clear_projection.assert_called_once() mock_parse_metric.assert_called_once_with(config) mock_parse_doc.assert_called_once_with(config) @@ -43,7 +43,7 @@ def test_update_filter_configuration(self, mock_set_etag, mock_parse_metric, moc @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._filter._validate_derived_metric_info") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._filter.DerivedMetricInfo") def test_parse_metric_filter_configuration( - self, dict_mock, validate_mock, rename_mock, init_projection_mock, set_metric_info_mock + self, dict_mock, validate_mock, rename_mock, init_projection_mock, set_metric_info_mock ): test_config_bytes = '{"Metrics":[{"Id":"94.e4b85108","TelemetryType":"Request","FilterGroups":[{"Filters":[]}],"Projection":"Count()","Aggregation":"Sum","BackEndAggregation":"Sum"}]}'.encode() test_config_dict = json.loads(test_config_bytes.decode()) @@ -68,7 +68,7 @@ def test_parse_metric_filter_configuration( @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._filter._validate_derived_metric_info") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._filter.DerivedMetricInfo") def test_parse_metric_filter_configuration_invalid( - self, dict_mock, validate_mock, rename_mock, init_projection_mock, set_metric_info_mock + self, dict_mock, validate_mock, rename_mock, init_projection_mock, set_metric_info_mock ): test_config_bytes = '{"Metrics":[{"Id":"94.e4b85108","TelemetryType":"Request","FilterGroups":[{"Filters":[]}],"Projection":"Count()","Aggregation":"Sum","BackEndAggregation":"Sum"}]}'.encode() test_config_dict = json.loads(test_config_bytes.decode()) @@ -88,9 +88,7 @@ def test_parse_metric_filter_configuration_invalid( @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._filter._rename_exception_fields_for_filtering") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._filter._validate_document_filter_group_info") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._filter.DocumentStreamInfo") - def test_parse_doc_filter_configuration( - self, dict_mock, validate_mock, rename_mock, set_doc_info_mock - ): + def test_parse_doc_filter_configuration(self, dict_mock, validate_mock, rename_mock, set_doc_info_mock): test_config_bytes = '{"DocumentStreams": [ { "Id": "26a.7cf471b0", "DocumentFilterGroups": [ { "TelemetryType": "Request", "Filters": { "Filters": [ { "FieldName": "Success", "Predicate": "Equal", "Comparand": "true" }, { "FieldName": "Url", "Predicate": "Contains", "Comparand": "privacy" } ] } }, { "TelemetryType": "Dependency", "Filters": { "Filters": [ { "FieldName": "Success", "Predicate": "Equal", "Comparand": "true" } ] } }, { "TelemetryType": "Exception", "Filters": { "Filters": [] } }, { "TelemetryType": "Event", "Filters": { "Filters": [] } }, { "TelemetryType": "Trace", "Filters": { "Filters": [] } }, { "TelemetryType": "Request", "Filters": { "Filters": [ { "FieldName": "Duration", "Predicate": "LessThan", "Comparand": "0.0:0:0.015" } ] } } ] } ]}'.encode() test_config_dict = json.loads(test_config_bytes.decode()) doc_stream_mock = mock.Mock() @@ -105,9 +103,7 @@ def test_parse_doc_filter_configuration( validate_mock.assert_called_once_with(filter_group_mock) rename_mock.assert_called_once_with(filter_group_mock.filters) doc_infos = {} - doc_infos_inner = { - "26a.7cf471b0": [filter_group_mock.filters] - } + doc_infos_inner = {"26a.7cf471b0": [filter_group_mock.filters]} doc_infos[TelemetryType.REQUEST] = doc_infos_inner set_doc_info_mock.assert_called_once_with(doc_infos) @@ -115,9 +111,7 @@ def test_parse_doc_filter_configuration( @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._filter._rename_exception_fields_for_filtering") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._filter._validate_document_filter_group_info") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._filter.DocumentStreamInfo") - def test_parse_doc_filter_configuration_invalid( - self, dict_mock, validate_mock, rename_mock, set_doc_info_mock - ): + def test_parse_doc_filter_configuration_invalid(self, dict_mock, validate_mock, rename_mock, set_doc_info_mock): test_config_bytes = '{"DocumentStreams": [ { "Id": "26a.7cf471b0", "DocumentFilterGroups": [ { "TelemetryType": "Request", "Filters": { "Filters": [ { "FieldName": "Success", "Predicate": "Equal", "Comparand": "true" }, { "FieldName": "Url", "Predicate": "Contains", "Comparand": "privacy" } ] } }, { "TelemetryType": "Dependency", "Filters": { "Filters": [ { "FieldName": "Success", "Predicate": "Equal", "Comparand": "true" } ] } }, { "TelemetryType": "Exception", "Filters": { "Filters": [] } }, { "TelemetryType": "Event", "Filters": { "Filters": [] } }, { "TelemetryType": "Trace", "Filters": { "Filters": [] } }, { "TelemetryType": "Request", "Filters": { "Filters": [ { "FieldName": "Duration", "Predicate": "LessThan", "Comparand": "0.0:0:0.015" } ] } } ] } ]}'.encode() test_config_dict = json.loads(test_config_bytes.decode()) doc_stream_mock = mock.Mock() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_live_metrics.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_live_metrics.py index e7ba39449125..35d30f741992 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_live_metrics.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_live_metrics.py @@ -21,23 +21,21 @@ def test_enable_live_metrics_basic(self, manager_mock, statsbeat_mock): mock_manager_instance = mock.Mock() mock_manager_instance.initialize.return_value = True manager_mock.return_value = mock_manager_instance - + enable_live_metrics( - connection_string="InstrumentationKey=test-key", - resource=mock_resource, - credential="test-credential" + connection_string="InstrumentationKey=test-key", resource=mock_resource, credential="test-credential" ) - + # Verify manager was obtained (likely returns singleton instance) manager_mock.assert_called_once() - + # Verify manager was initialized with correct kwargs mock_manager_instance.initialize.assert_called_once_with( connection_string="InstrumentationKey=test-key", resource=mock_resource, credential="test-credential", ) - + # Verify statsbeat feature was set statsbeat_mock.assert_called_once() @@ -48,17 +46,15 @@ def test_enable_live_metrics_initialization_fails(self, manager_mock, statsbeat_ mock_manager_instance = mock.Mock() mock_manager_instance.initialize.return_value = False manager_mock.return_value = mock_manager_instance - + enable_live_metrics(connection_string="InstrumentationKey=test-key") - + # Verify manager was obtained manager_mock.assert_called_once() - + # Verify manager was initialized with connection string - mock_manager_instance.initialize.assert_called_once_with( - connection_string="InstrumentationKey=test-key" - ) - + mock_manager_instance.initialize.assert_called_once_with(connection_string="InstrumentationKey=test-key") + # Verify statsbeat feature was still set (regardless of initialization success) statsbeat_mock.assert_called_once() @@ -69,15 +65,15 @@ def test_enable_live_metrics_with_minimal_args(self, manager_mock, statsbeat_moc mock_manager_instance = mock.Mock() mock_manager_instance.initialize.return_value = True manager_mock.return_value = mock_manager_instance - + enable_live_metrics() - + # Verify manager was obtained manager_mock.assert_called_once() - + # Verify initialization was attempted with no kwargs mock_manager_instance.initialize.assert_called_once_with() - + # Verify statsbeat feature was set statsbeat_mock.assert_called_once() @@ -85,32 +81,30 @@ def test_enable_live_metrics_with_minimal_args(self, manager_mock, statsbeat_moc @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._live_metrics.get_quickpulse_manager") def test_enable_live_metrics_with_all_parameters(self, manager_mock, statsbeat_mock): """Test enable_live_metrics with all possible parameters.""" - mock_resource = Resource.create({ - "service.name": "test-service", - "service.version": "1.0.0", - "deployment.environment": "test" - }) + mock_resource = Resource.create( + {"service.name": "test-service", "service.version": "1.0.0", "deployment.environment": "test"} + ) mock_credential = mock.Mock() mock_manager_instance = mock.Mock() mock_manager_instance.initialize.return_value = True manager_mock.return_value = mock_manager_instance - + enable_live_metrics( connection_string="InstrumentationKey=test-key;EndpointSuffix=applicationinsights.azure.cn", credential=mock_credential, resource=mock_resource, - custom_param="custom_value" # Test that additional kwargs are passed through + custom_param="custom_value", # Test that additional kwargs are passed through ) - + # Verify manager was obtained manager_mock.assert_called_once() - + # Verify initialization was called with all parameters including custom ones mock_manager_instance.initialize.assert_called_once_with( connection_string="InstrumentationKey=test-key;EndpointSuffix=applicationinsights.azure.cn", credential=mock_credential, resource=mock_resource, - custom_param="custom_value" + custom_param="custom_value", ) @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._live_metrics.set_statsbeat_live_metrics_feature_set") @@ -120,22 +114,14 @@ def test_enable_live_metrics_with_none_values(self, manager_mock, statsbeat_mock mock_manager_instance = mock.Mock() mock_manager_instance.initialize.return_value = True manager_mock.return_value = mock_manager_instance - - enable_live_metrics( - connection_string=None, - resource=None, - credential=None - ) - + + enable_live_metrics(connection_string=None, resource=None, credential=None) + # Verify manager was obtained manager_mock.assert_called_once() - + # Verify initialization was called with None values - mock_manager_instance.initialize.assert_called_once_with( - connection_string=None, - resource=None, - credential=None - ) + mock_manager_instance.initialize.assert_called_once_with(connection_string=None, resource=None, credential=None) @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._live_metrics.set_statsbeat_live_metrics_feature_set") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._live_metrics.get_quickpulse_manager") @@ -144,17 +130,15 @@ def test_enable_live_metrics_empty_string_connection(self, manager_mock, statsbe mock_manager_instance = mock.Mock() mock_manager_instance.initialize.return_value = True manager_mock.return_value = mock_manager_instance - + enable_live_metrics(connection_string="") - + # Verify manager was obtained manager_mock.assert_called_once() - + # Verify initialization was called with empty string - mock_manager_instance.initialize.assert_called_once_with( - connection_string="" - ) - + mock_manager_instance.initialize.assert_called_once_with(connection_string="") + # Verify statsbeat feature was set statsbeat_mock.assert_called_once() @@ -162,37 +146,35 @@ def test_enable_live_metrics_empty_string_connection(self, manager_mock, statsbe @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._live_metrics.get_quickpulse_manager") def test_enable_live_metrics_complex_resource(self, manager_mock, statsbeat_mock): """Test enable_live_metrics with complex resource attributes.""" - mock_resource = Resource.create({ - "service.name": "test-service", - "service.version": "1.2.3", - "service.namespace": "test-namespace", - "deployment.environment": "production", - "cloud.provider": "azure", - "cloud.platform": "azure_app_service", - "host.name": "test-host", - "process.pid": 12345, - "telemetry.sdk.name": "opentelemetry", - "telemetry.sdk.language": "python", - "telemetry.sdk.version": "1.0.0" - }) + mock_resource = Resource.create( + { + "service.name": "test-service", + "service.version": "1.2.3", + "service.namespace": "test-namespace", + "deployment.environment": "production", + "cloud.provider": "azure", + "cloud.platform": "azure_app_service", + "host.name": "test-host", + "process.pid": 12345, + "telemetry.sdk.name": "opentelemetry", + "telemetry.sdk.language": "python", + "telemetry.sdk.version": "1.0.0", + } + ) mock_manager_instance = mock.Mock() mock_manager_instance.initialize.return_value = True manager_mock.return_value = mock_manager_instance - - enable_live_metrics( - connection_string="InstrumentationKey=test-key", - resource=mock_resource - ) - + + enable_live_metrics(connection_string="InstrumentationKey=test-key", resource=mock_resource) + # Verify manager was obtained manager_mock.assert_called_once() - + # Verify initialization was called with complex resource mock_manager_instance.initialize.assert_called_once_with( - connection_string="InstrumentationKey=test-key", - resource=mock_resource + connection_string="InstrumentationKey=test-key", resource=mock_resource ) - + # Verify statsbeat feature was set statsbeat_mock.assert_called_once() @@ -203,23 +185,23 @@ def test_enable_live_metrics_multiple_calls(self, manager_mock, statsbeat_mock): mock_manager_instance = mock.Mock() mock_manager_instance.initialize.return_value = True manager_mock.return_value = mock_manager_instance - + # First call enable_live_metrics(connection_string="InstrumentationKey=test-key1") - + # Second call with different parameters enable_live_metrics(connection_string="InstrumentationKey=test-key2") - + # Verify manager was obtained twice (since it's likely a singleton, this tests the behavior) self.assertEqual(manager_mock.call_count, 2) - + # Verify initialization was called twice with different parameters expected_calls = [ mock.call(connection_string="InstrumentationKey=test-key1"), - mock.call(connection_string="InstrumentationKey=test-key2") + mock.call(connection_string="InstrumentationKey=test-key2"), ] mock_manager_instance.initialize.assert_has_calls(expected_calls) - + # Verify statsbeat feature was set twice self.assertEqual(statsbeat_mock.call_count, 2) @@ -230,26 +212,26 @@ def test_enable_live_metrics_kwargs_preservation(self, manager_mock, statsbeat_m mock_manager_instance = mock.Mock() mock_manager_instance.initialize.return_value = True manager_mock.return_value = mock_manager_instance - + custom_kwargs = { "connection_string": "InstrumentationKey=test-key", "custom_param1": "value1", "custom_param2": 42, "custom_param3": True, "custom_param4": ["list", "of", "values"], - "custom_param5": {"nested": "dict"} + "custom_param5": {"nested": "dict"}, } - + enable_live_metrics(**custom_kwargs) - + # Verify manager was obtained manager_mock.assert_called_once() - + # Verify all kwargs were passed through to initialize mock_manager_instance.initialize.assert_called_once_with(**custom_kwargs) - + # Verify statsbeat feature was set statsbeat_mock.assert_called_once() -# cSpell:enable +# cSpell:enable diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_manager.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_manager.py index 235c553c77d1..f59803cb6a1b 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_manager.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_manager.py @@ -20,7 +20,7 @@ from opentelemetry.semconv.trace import SpanAttributes from opentelemetry.trace import SpanKind -from azure.monitor.opentelemetry.exporter._generated.models import ContextTagKeys +from azure.monitor.opentelemetry.exporter._generated.exporter.models import ContextTagKeys from azure.monitor.opentelemetry.exporter._quickpulse._constants import ( _DEPENDENCY_DURATION_NAME, _DEPENDENCY_FAILURE_RATE_NAME, @@ -100,13 +100,13 @@ def test_init(self, generator_mock): generator_mock.return_value = "test_trace_id" part_a_fields = _populate_part_a_fields(self.resource) qpm = _QuickpulseManager() - + # Initialize with kwargs qpm.initialize( connection_string=self.connection_string, resource=self.resource, ) - + self.assertEqual(_get_global_quickpulse_state(), _QuickpulseState.PING_SHORT) self.assertTrue(isinstance(qpm._exporter, _QuickpulseExporter)) self.assertEqual( @@ -192,42 +192,39 @@ def test_singleton(self): def test_initialize_success(self): """Test successful initialization.""" qpm = _QuickpulseManager() - + # Initially not initialized self.assertFalse(qpm.is_initialized()) - + # Initialize should succeed - result = qpm.initialize( - connection_string=self.connection_string, - resource=self.resource - ) + result = qpm.initialize(connection_string=self.connection_string, resource=self.resource) self.assertTrue(result) self.assertTrue(qpm.is_initialized()) - + # Should have created all necessary components self.assertIsNotNone(qpm._exporter) self.assertIsNotNone(qpm._reader) self.assertIsNotNone(qpm._meter_provider) self.assertIsNotNone(qpm._meter) self.assertIsNotNone(qpm._base_monitoring_data_point) - + # Cleanup qpm.shutdown() def test_initialize_already_initialized(self): """Test initialization when already initialized.""" qpm = _QuickpulseManager() - + # First initialization result1 = qpm.initialize(connection_string=self.connection_string) self.assertTrue(result1) self.assertTrue(qpm.is_initialized()) - + # Second initialization should return True without reinitializing result2 = qpm.initialize(connection_string=self.connection_string) self.assertTrue(result2) self.assertTrue(qpm.is_initialized()) - + # Cleanup qpm.shutdown() @@ -235,14 +232,14 @@ def test_initialize_already_initialized(self): def test_initialize_failure(self, exporter_mock): """Test initialization failure handling.""" exporter_mock.side_effect = Exception("Exporter creation failed") - + qpm = _QuickpulseManager() - + # Initialize should fail result = qpm.initialize(connection_string=self.connection_string) self.assertFalse(result) self.assertFalse(qpm.is_initialized()) - + # Components should be cleaned up self.assertIsNone(qpm._exporter) self.assertIsNone(qpm._reader) @@ -252,28 +249,28 @@ def test_initialize_failure(self, exporter_mock): def test_initialize_with_default_resource(self): """Test initialization with default resource when none provided.""" qpm = _QuickpulseManager() - + result = qpm.initialize(connection_string=self.connection_string) self.assertTrue(result) self.assertTrue(qpm.is_initialized()) self.assertIsNotNone(qpm._base_monitoring_data_point) - + # Cleanup qpm.shutdown() def test_shutdown_success(self): """Test successful shutdown.""" qpm = _QuickpulseManager() - + # Initialize first qpm.initialize(connection_string=self.connection_string) self.assertTrue(qpm.is_initialized()) - + # Shutdown should succeed result = qpm.shutdown() self.assertTrue(result) self.assertFalse(qpm.is_initialized()) - + # Components should be cleaned up self.assertIsNone(qpm._exporter) self.assertIsNone(qpm._reader) @@ -283,7 +280,7 @@ def test_shutdown_success(self): def test_shutdown_not_initialized(self): """Test shutdown when not initialized.""" qpm = _QuickpulseManager() - + # Shutdown should return False when not initialized result = qpm.shutdown() self.assertFalse(result) @@ -293,13 +290,13 @@ def test_shutdown_not_initialized(self): def test_shutdown_meter_provider_exception(self, logger_mock): """Test shutdown handling meter provider exception.""" qpm = _QuickpulseManager() - + # Initialize first qpm.initialize(connection_string=self.connection_string) - + # Mock meter provider to raise exception on shutdown qpm._meter_provider.shutdown = mock.Mock(side_effect=Exception("Shutdown failed")) - + # Shutdown should handle exception and return False result = qpm.shutdown() self.assertFalse(result) @@ -331,18 +328,18 @@ def test_record_span_server_success(self, post_state_mock, data_mock, metric_der connection_string=self.connection_string, resource=self.resource, ) - + # Mock the metric instruments qpm._request_rate_counter = mock.Mock() qpm._request_duration = mock.Mock() - + qpm._record_span(span_mock) qpm._request_rate_counter.add.assert_called_once_with(1) qpm._request_duration.record.assert_called_once_with(5e-09) data_mock._from_span.assert_called_once_with(span_mock) metric_derive_mock.assert_called_once_with(data) doc_mock.assert_called_once_with(data) - + # Cleanup qpm.shutdown() @@ -372,18 +369,18 @@ def test_record_span_server_failure(self, post_state_mock, data_mock, metric_der connection_string=self.connection_string, resource=self.resource, ) - + # Mock the metric instruments qpm._request_failed_rate_counter = mock.Mock() qpm._request_duration = mock.Mock() - + qpm._record_span(span_mock) qpm._request_failed_rate_counter.add.assert_called_once_with(1) qpm._request_duration.record.assert_called_once_with(5e-09) data_mock._from_span.assert_called_once_with(span_mock) metric_derive_mock.assert_called_once_with(data) doc_mock.assert_called_once_with(data) - + # Cleanup qpm.shutdown() @@ -415,18 +412,18 @@ def test_record_span_dep_success(self, post_state_mock, data_mock, metric_derive connection_string=self.connection_string, resource=self.resource, ) - + # Mock the metric instruments qpm._dependency_rate_counter = mock.Mock() qpm._dependency_duration = mock.Mock() - + qpm._record_span(span_mock) qpm._dependency_rate_counter.add.assert_called_once_with(1) qpm._dependency_duration.record.assert_called_once_with(5e-09) data_mock._from_span.assert_called_once_with(span_mock) metric_derive_mock.assert_called_once_with(data) doc_mock.assert_called_once_with(data) - + # Cleanup qpm.shutdown() @@ -458,18 +455,18 @@ def test_record_span_dep_failure(self, post_state_mock, data_mock, metric_derive connection_string=self.connection_string, resource=self.resource, ) - + # Mock the metric instruments qpm._dependency_failure_rate_counter = mock.Mock() qpm._dependency_duration = mock.Mock() - + qpm._record_span(span_mock) qpm._dependency_failure_rate_counter.add.assert_called_once_with(1) qpm._dependency_duration.record.assert_called_once_with(5e-09) data_mock._from_span.assert_called_once_with(span_mock) metric_derive_mock.assert_called_once_with(data) doc_mock.assert_called_once_with(data) - + # Cleanup qpm.shutdown() @@ -477,9 +474,7 @@ def test_record_span_dep_failure(self, post_state_mock, data_mock, metric_derive @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._derive_metrics_from_telemetry_data") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._TelemetryData") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._is_post_state") - def test_record_span_derive_filter_metrics( - self, post_state_mock, data_mock, metric_derive_mock, doc_mock - ): + def test_record_span_derive_filter_metrics(self, post_state_mock, data_mock, metric_derive_mock, doc_mock): post_state_mock.return_value = True span_mock = mock.Mock() span_mock.end_time = 10 @@ -503,7 +498,7 @@ def test_record_span_derive_filter_metrics( data_mock._from_span.assert_called_once_with(span_mock) metric_derive_mock.assert_called_once_with(data) doc_mock.assert_called_once_with(data) - + # Cleanup qpm.shutdown() @@ -513,7 +508,12 @@ def test_record_span_derive_filter_metrics( @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._TelemetryData") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._is_post_state") def test_record_span_span_event_exception( - self, post_state_mock, data_mock, metric_derive_mock, doc_mock, exc_data_mock, + self, + post_state_mock, + data_mock, + metric_derive_mock, + doc_mock, + exc_data_mock, ): post_state_mock.return_value = True span_mock = mock.Mock() @@ -542,10 +542,10 @@ def test_record_span_span_event_exception( connection_string=self.connection_string, resource=self.resource, ) - + # Mock the metric instruments qpm._exception_rate_counter = mock.Mock() - + qpm._record_span(span_mock) qpm._exception_rate_counter.add.assert_called_once_with(1) data_mock._from_span.assert_called_once_with(span_mock) @@ -554,7 +554,7 @@ def test_record_span_span_event_exception( doc_mock.assert_any_call(data) metric_derive_mock.assert_any_call(exc_data) doc_mock.assert_any_call(exc_data) - + # Cleanup qpm.shutdown() @@ -578,16 +578,16 @@ def test_record_log(self, post_state_mock, data_mock, metric_derive_mock, doc_mo connection_string=self.connection_string, resource=self.resource, ) - + # Mock the metric instruments qpm._exception_rate_counter = mock.Mock() - + qpm._record_log_record(log_data_mock) qpm._exception_rate_counter.add.assert_not_called() data_mock._from_log_record.assert_called_once_with(log_record_mock) metric_derive_mock.assert_called_once_with(data) doc_mock.assert_called_once_with(data, None) - + # Cleanup qpm.shutdown() @@ -616,16 +616,16 @@ def test_record_log_exception(self, post_state_mock, data_mock, metric_derive_mo connection_string=self.connection_string, resource=self.resource, ) - + # Mock the metric instruments qpm._exception_rate_counter = mock.Mock() - + qpm._record_log_record(log_data_mock) qpm._exception_rate_counter.add.assert_called_once_with(1) data_mock._from_log_record.assert_called_once_with(log_record_mock) metric_derive_mock.assert_called_once_with(data) doc_mock.assert_called_once_with(data, "exc_type") - + # Cleanup qpm.shutdown() @@ -633,9 +633,7 @@ def test_record_log_exception(self, post_state_mock, data_mock, metric_derive_mo @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._derive_metrics_from_telemetry_data") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._TelemetryData") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._is_post_state") - def test_record_log_derive_filter_metrics( - self, post_state_mock, data_mock, metric_derive_mock, doc_mock - ): + def test_record_log_derive_filter_metrics(self, post_state_mock, data_mock, metric_derive_mock, doc_mock): post_state_mock.return_value = True log_record_mock = mock.Mock() log_record_mock.attributes = {} @@ -655,16 +653,14 @@ def test_record_log_derive_filter_metrics( data_mock._from_log_record.assert_called_once_with(log_record_mock) metric_derive_mock.assert_called_once_with(data) doc_mock.assert_called_once_with(data, None) - + # Cleanup qpm.shutdown() @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._create_projections") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._check_metric_filters") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._get_quickpulse_derived_metric_infos") - def test_derive_metrics_from_telemetry_data( - self, get_derived_mock, filter_mock, projection_mock - ): + def test_derive_metrics_from_telemetry_data(self, get_derived_mock, filter_mock, projection_mock): metric_infos = [mock.Mock()] get_derived_mock.return_value = { TelemetryType.DEPENDENCY: metric_infos, @@ -714,12 +710,14 @@ def test_derive_metrics_from_telemetry_data_filter_false(self, get_derived_mock, @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._check_filters") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._get_quickpulse_doc_stream_infos") def test_apply_doc_filters_from_telemetry_data( - self, get_doc_stream_mock, filter_mock, get_span_mock, append_mock, + self, + get_doc_stream_mock, + filter_mock, + get_span_mock, + append_mock, ): filter_group_mock = mock.Mock() - doc_infos_inner = { - "streamId": [filter_group_mock] - } + doc_infos_inner = {"streamId": [filter_group_mock]} get_doc_stream_mock.return_value = { TelemetryType.DEPENDENCY: doc_infos_inner, } @@ -748,7 +746,11 @@ def test_apply_doc_filters_from_telemetry_data( @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._check_filters") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._get_quickpulse_doc_stream_infos") def test_apply_doc_filters_from_telemetry_data_all_streams( - self, get_doc_stream_mock, filter_mock, get_span_mock, append_mock, + self, + get_doc_stream_mock, + filter_mock, + get_span_mock, + append_mock, ): get_doc_stream_mock.return_value = { TelemetryType.DEPENDENCY: {}, @@ -779,12 +781,14 @@ def test_apply_doc_filters_from_telemetry_data_all_streams( @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._check_filters") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._get_quickpulse_doc_stream_infos") def test_apply_doc_filters_from_telemetry_data_false_filter( - self, get_doc_stream_mock, filter_mock, get_span_mock, append_mock, + self, + get_doc_stream_mock, + filter_mock, + get_span_mock, + append_mock, ): filter_group_mock = mock.Mock() - doc_infos_inner = { - "streamId": [filter_group_mock] - } + doc_infos_inner = {"streamId": [filter_group_mock]} get_doc_stream_mock.return_value = { TelemetryType.DEPENDENCY: doc_infos_inner, } @@ -812,14 +816,14 @@ def test_apply_doc_filters_from_telemetry_data_false_filter( def test_validate_recording_resources(self): """Test _validate_recording_resources method.""" qpm = _QuickpulseManager() - + # Before initialization - should return False self.assertFalse(qpm._validate_recording_resources()) - + # After initialization - should return True qpm.initialize(connection_string=self.connection_string, resource=self.resource) self.assertTrue(qpm._validate_recording_resources()) - + # After shutdown - instruments should be preserved qpm.shutdown() self.assertTrue(qpm._validate_recording_resources()) @@ -830,19 +834,19 @@ def test_record_span_not_post_state(self, post_state_mock): post_state_mock.return_value = False qpm = _QuickpulseManager() qpm.initialize(connection_string=self.connection_string, resource=self.resource) - + span_mock = mock.Mock() - + # Mock the metric instruments to verify they're not called qpm._request_rate_counter = mock.Mock() qpm._request_duration = mock.Mock() - + qpm._record_span(span_mock) - + # Verify no metrics were recorded qpm._request_rate_counter.add.assert_not_called() qpm._request_duration.record.assert_not_called() - + # Cleanup qpm.shutdown() @@ -851,12 +855,12 @@ def test_record_span_not_initialized(self, post_state_mock): """Test _record_span when manager is not initialized.""" post_state_mock.return_value = True qpm = _QuickpulseManager() - + span_mock = mock.Mock() - + # Should not crash and should not record anything qpm._record_span(span_mock) - + # Verify manager is still not initialized self.assertFalse(qpm.is_initialized()) @@ -865,7 +869,9 @@ def test_record_span_not_initialized(self, post_state_mock): @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._derive_metrics_from_telemetry_data") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._TelemetryData") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._is_post_state") - def test_record_span_exception_handling(self, post_state_mock, data_mock, metric_derive_mock, doc_mock, logger_mock): + def test_record_span_exception_handling( + self, post_state_mock, data_mock, metric_derive_mock, doc_mock, logger_mock + ): """Test _record_span exception handling.""" post_state_mock.return_value = True span_mock = mock.Mock() @@ -874,19 +880,19 @@ def test_record_span_exception_handling(self, post_state_mock, data_mock, metric span_mock.status.is_ok = True span_mock.kind = SpanKind.SERVER span_mock.events = [] - + # Make _TelemetryData._from_span raise an exception data_mock._from_span.side_effect = Exception("Test exception") - + qpm = _QuickpulseManager() qpm.initialize(connection_string=self.connection_string, resource=self.resource) - + # Should not crash when exception occurs qpm._record_span(span_mock) - + # Verify exception was logged logger_mock.exception.assert_called_once() - + # Cleanup qpm.shutdown() @@ -896,17 +902,17 @@ def test_record_log_record_not_post_state(self, post_state_mock): post_state_mock.return_value = False qpm = _QuickpulseManager() qpm.initialize(connection_string=self.connection_string, resource=self.resource) - + log_data_mock = mock.Mock() - + # Mock the metric instruments to verify they're not called qpm._exception_rate_counter = mock.Mock() - + qpm._record_log_record(log_data_mock) - + # Verify no metrics were recorded qpm._exception_rate_counter.add.assert_not_called() - + # Cleanup qpm.shutdown() @@ -915,12 +921,12 @@ def test_record_log_record_not_initialized(self, post_state_mock): """Test _record_log_record when manager is not initialized.""" post_state_mock.return_value = True qpm = _QuickpulseManager() - + log_data_mock = mock.Mock() - + # Should not crash and should not record anything qpm._record_log_record(log_data_mock) - + # Verify manager is still not initialized self.assertFalse(qpm.is_initialized()) @@ -929,25 +935,27 @@ def test_record_log_record_not_initialized(self, post_state_mock): @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._derive_metrics_from_telemetry_data") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._TelemetryData") @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._is_post_state") - def test_record_log_record_exception_handling(self, post_state_mock, data_mock, metric_derive_mock, doc_mock, logger_mock): + def test_record_log_record_exception_handling( + self, post_state_mock, data_mock, metric_derive_mock, doc_mock, logger_mock + ): """Test _record_log_record exception handling.""" post_state_mock.return_value = True log_record_mock = mock.Mock() log_data_mock = mock.Mock() log_data_mock.log_record = log_record_mock - + # Make _TelemetryData._from_log_record raise an exception data_mock._from_log_record.side_effect = Exception("Test exception") - + qpm = _QuickpulseManager() qpm.initialize(connection_string=self.connection_string, resource=self.resource) - + # Should not crash when exception occurs qpm._record_log_record(log_data_mock) - + # Verify exception was logged logger_mock.exception.assert_called_once() - + # Cleanup qpm.shutdown() @@ -963,33 +971,33 @@ def test_record_log_record_no_log_record(self, post_state_mock, data_mock, metri qpm = _QuickpulseManager() qpm.initialize(connection_string=self.connection_string, resource=self.resource) - + # Should not crash and should not process anything qpm._record_log_record(log_data_mock) - + # Verify no telemetry data processing occurred data_mock._from_log_record.assert_not_called() metric_derive_mock.assert_not_called() doc_mock.assert_not_called() - + # Cleanup qpm.shutdown() def test_create_metric_instruments_no_meter(self): """Test _create_metric_instruments when meter is None.""" qpm = _QuickpulseManager() - + # Should raise ValueError when meter is not set with self.assertRaises(ValueError) as context: qpm._create_metric_instruments() - + self.assertIn("Meter must be initialized", str(context.exception)) @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._manager._get_quickpulse_derived_metric_infos") def test_derive_metrics_no_config(self, get_derived_mock): """Test _derive_metrics_from_telemetry_data when no filtering is configured.""" get_derived_mock.return_value = {} - + data = _DependencyData( duration=0, success=True, @@ -1000,7 +1008,7 @@ def test_derive_metrics_no_config(self, get_derived_mock): data="", custom_dimensions={}, ) - + # Should return early without processing _derive_metrics_from_telemetry_data(data) get_derived_mock.assert_called_once() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_processor.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_processor.py index cc6a890c403e..89778cee063b 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_processor.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_processor.py @@ -26,7 +26,7 @@ def tearDownClass(cls) -> None: def test_emit(self, mock_get_manager): mock_manager = mock.Mock() mock_get_manager.return_value = mock_manager - + processor = _QuickpulseLogRecordProcessor() readable_log_record = mock.Mock() processor.on_emit(readable_log_record) @@ -56,4 +56,3 @@ def test_on_end(self, mock_get_manager): span = mock.Mock() processor.on_end(span) mock_manager._record_span.assert_called_once_with(span) - diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_projection.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_projection.py index e81d3c9b7078..59198e438bca 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_projection.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_projection.py @@ -18,6 +18,7 @@ _RequestData, ) + class TestProjection(unittest.TestCase): @mock.patch("azure.monitor.opentelemetry.exporter._quickpulse._projection._set_quickpulse_projection_map") @@ -105,4 +106,4 @@ def test_calculate_aggregation_avg(self, projection_map_mock): def test_calculate_aggregation_none(self, projection_map_mock): projection_map_mock.return_value = {"test-id": (AggregationType.AVG, 3.0, 3)} agg_tuple = _calculate_aggregation(AggregationType.AVG, "test-id2", 5.0) - self.assertIsNone(agg_tuple) \ No newline at end of file + self.assertIsNone(agg_tuple) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_types.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_types.py index 9979f69f6be6..ae6ba4644ff5 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_types.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/quickpulse/test_types.py @@ -24,7 +24,6 @@ ) - class TestTelemetryData(unittest.TestCase): @patch("azure.monitor.opentelemetry.exporter._quickpulse._types._RequestData") @@ -62,7 +61,7 @@ def test_from_log_record_with_exception(self, fn_mock): log_record = Mock() log_record.attributes = { SpanAttributes.EXCEPTION_TYPE: "SomeException", - SpanAttributes.EXCEPTION_MESSAGE: "An error occurred" + SpanAttributes.EXCEPTION_MESSAGE: "An error occurred", } data_mock = Mock() fn_mock._from_log_record.return_value = data_mock @@ -90,10 +89,7 @@ def test_from_span_with_valid_data(self, utils_mock): span.start_time = 1000000000 span.name = "test_span" span.status.is_ok = True - span.attributes = { - SpanAttributes.HTTP_STATUS_CODE: 200, - "custom_attribute": "value" - } + span.attributes = {SpanAttributes.HTTP_STATUS_CODE: 200, "custom_attribute": "value"} utils_mock.return_value = "http://example.com" result = _RequestData._from_span(span) @@ -112,10 +108,7 @@ def test_from_span_with_error_status_code(self, utils_mock): span.start_time = 1000000000 span.name = "test_span" span.status.is_ok = True - span.attributes = { - SpanAttributes.HTTP_STATUS_CODE: 404, - "custom_attribute": "value" - } + span.attributes = {SpanAttributes.HTTP_STATUS_CODE: 404, "custom_attribute": "value"} utils_mock.return_value = "http://example.com" result = _RequestData._from_span(span) @@ -134,10 +127,7 @@ def test_from_span_http_stable_semconv(self, utils_mock): span.start_time = 1000000000 span.name = "test_span" span.status.is_ok = True - span.attributes = { - HTTP_RESPONSE_STATUS_CODE: 200, - "custom_attribute": "value" - } + span.attributes = {HTTP_RESPONSE_STATUS_CODE: 200, "custom_attribute": "value"} utils_mock.return_value = "http://example.com" result = _RequestData._from_span(span) @@ -174,21 +164,15 @@ def test_inproc_dependency(self): def test_http_dependency(self): self.span.kind = SpanKind.CLIENT - self.span.attributes = { - SpanAttributes.HTTP_METHOD: "GET", - SpanAttributes.HTTP_URL: "http://example.com" - } + self.span.attributes = {SpanAttributes.HTTP_METHOD: "GET", SpanAttributes.HTTP_URL: "http://example.com"} result = _DependencyData._from_span(self.span) self.assertEqual(result.type, "HTTP") self.assertEqual(result.data, "http://example.com") self.assertEqual(result.target, "example.com") - + def test_http_dependency_stable_semconv(self): self.span.kind = SpanKind.CLIENT - self.span.attributes = { - HTTP_REQUEST_METHOD: "GET", - SpanAttributes.HTTP_URL: "http://example.com" - } + self.span.attributes = {HTTP_REQUEST_METHOD: "GET", SpanAttributes.HTTP_URL: "http://example.com"} result = _DependencyData._from_span(self.span) self.assertEqual(result.type, "HTTP") self.assertEqual(result.data, "http://example.com") @@ -196,10 +180,7 @@ def test_http_dependency_stable_semconv(self): def test_db_dependency(self): self.span.kind = SpanKind.CLIENT - self.span.attributes = { - SpanAttributes.DB_SYSTEM: "mysql", - SpanAttributes.DB_STATEMENT: "SELECT * FROM table" - } + self.span.attributes = {SpanAttributes.DB_SYSTEM: "mysql", SpanAttributes.DB_STATEMENT: "SELECT * FROM table"} result = _DependencyData._from_span(self.span) self.assertEqual(result.type, "mysql") self.assertEqual(result.data, "SELECT * FROM table") @@ -207,51 +188,44 @@ def test_db_dependency(self): def test_messaging_dependency(self): self.span.kind = SpanKind.CLIENT - self.span.attributes = { - SpanAttributes.MESSAGING_SYSTEM: "kafka" - } + self.span.attributes = {SpanAttributes.MESSAGING_SYSTEM: "kafka"} result = _DependencyData._from_span(self.span) self.assertEqual(result.type, "kafka") self.assertEqual(result.target, "kafka") def test_rpc_dependency(self): self.span.kind = SpanKind.CLIENT - self.span.attributes = { - SpanAttributes.RPC_SYSTEM: "grpc" - } + self.span.attributes = {SpanAttributes.RPC_SYSTEM: "grpc"} result = _DependencyData._from_span(self.span) self.assertEqual(result.type, "grpc") self.assertEqual(result.target, "grpc") def test_genai_dependency(self): self.span.kind = SpanKind.CLIENT - self.span.attributes = { - gen_ai_attributes.GEN_AI_SYSTEM: "genai" - } + self.span.attributes = {gen_ai_attributes.GEN_AI_SYSTEM: "genai"} result = _DependencyData._from_span(self.span) self.assertEqual(result.type, "genai") def test_producer_dependency(self): self.span.kind = SpanKind.PRODUCER - self.span.attributes = { - SpanAttributes.MESSAGING_SYSTEM: "kafka" - } + self.span.attributes = {SpanAttributes.MESSAGING_SYSTEM: "kafka"} result = _DependencyData._from_span(self.span) self.assertEqual(result.type, "Queue Message | kafka") + class TestExceptionData(unittest.TestCase): def setUp(self): self.log_record = Mock(spec=LogRecord) self.log_record.attributes = { SpanAttributes.EXCEPTION_MESSAGE: "Test exception message", - SpanAttributes.EXCEPTION_STACKTRACE: "Test stack trace" + SpanAttributes.EXCEPTION_STACKTRACE: "Test stack trace", } self.span_event = Mock(spec=LogRecord) self.span_event.attributes = { SpanAttributes.EXCEPTION_MESSAGE: "Test span event message", - SpanAttributes.EXCEPTION_STACKTRACE: "Test span event stack trace" + SpanAttributes.EXCEPTION_STACKTRACE: "Test span event stack trace", } def test_from_log_record(self): @@ -280,4 +254,5 @@ def test_from_span_event_empty_attributes(self): self.assertEqual(result.stack_trace, "") self.assertEqual(result.custom_dimensions, self.span_event.attributes) + # cSpell:enable diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/statsbeat/test_manager.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/statsbeat/test_manager.py index 53e2e8d32400..df0b44223aee 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/statsbeat/test_manager.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/statsbeat/test_manager.py @@ -21,7 +21,7 @@ class TestStatsbeatConfig(unittest.TestCase): - + def setUp(self): os.environ.pop(_APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL, None) @@ -30,9 +30,9 @@ def test_init_basic(self): config = StatsbeatConfig( endpoint="https://westus-1.in.applicationinsights.azure.com/", region="westus", - instrumentation_key="test-key" + instrumentation_key="test-key", ) - + self.assertEqual(config.endpoint, "https://westus-1.in.applicationinsights.azure.com/") self.assertEqual(config.region, "westus") self.assertEqual(config.instrumentation_key, "test-key") @@ -50,30 +50,35 @@ def test_init_with_all_parameters(self): disable_offline_storage=True, credential=mock.Mock(), distro_version="1.0.0", - connection_string="InstrumentationKey=4321abcd-5678-4efa-8abc-1234567890ab;IngestionEndpoint=https://westus-0.in.applicationinsights.azure.com/" + connection_string="InstrumentationKey=4321abcd-5678-4efa-8abc-1234567890ab;IngestionEndpoint=https://westus-0.in.applicationinsights.azure.com/", ) - + self.assertEqual(config.endpoint, "https://westus-1.in.applicationinsights.azure.com/") self.assertEqual(config.region, "westus") self.assertEqual(config.instrumentation_key, "test-key") self.assertTrue(config.disable_offline_storage) self.assertIsNotNone(config.credential) self.assertEqual(config.distro_version, "1.0.0") - self.assertEqual(config.connection_string, "InstrumentationKey=4321abcd-5678-4efa-8abc-1234567890ab;IngestionEndpoint=https://westus-0.in.applicationinsights.azure.com/") + self.assertEqual( + config.connection_string, + "InstrumentationKey=4321abcd-5678-4efa-8abc-1234567890ab;IngestionEndpoint=https://westus-0.in.applicationinsights.azure.com/", + ) - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager._get_stats_connection_string') + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager._get_stats_connection_string") def test_init_invalid_connection_string_fallback(self, mock_get_stats_cs): """Test that invalid connection string falls back to default.""" mock_get_stats_cs.return_value = "InstrumentationKey=fallback;IngestionEndpoint=https://fallback.com/" - + config = StatsbeatConfig( endpoint="https://westus-1.in.applicationinsights.azure.com/", region="westus", instrumentation_key="test-key", - connection_string="invalid-connection-string" + connection_string="invalid-connection-string", + ) + + self.assertEqual( + config.connection_string, "InstrumentationKey=fallback;IngestionEndpoint=https://fallback.com/" ) - - self.assertEqual(config.connection_string, "InstrumentationKey=fallback;IngestionEndpoint=https://fallback.com/") mock_get_stats_cs.assert_called_once() def test_from_exporter_valid(self): @@ -85,9 +90,9 @@ def test_from_exporter_valid(self): exporter._disable_offline_storage = True exporter._credential = mock.Mock() exporter._distro_version = "1.0.0" - + config = StatsbeatConfig.from_exporter(exporter) - + self.assertIsNotNone(config) if config: self.assertEqual(config.endpoint, "https://westus-1.in.applicationinsights.azure.com/") @@ -103,7 +108,7 @@ def test_from_exporter_missing_instrumentation_key(self): exporter._endpoint = "https://westus-1.in.applicationinsights.azure.com/" exporter._region = "westus" exporter._instrumentation_key = None - + config = StatsbeatConfig.from_exporter(exporter) self.assertIsNone(config) @@ -113,7 +118,7 @@ def test_from_exporter_missing_endpoint(self): exporter._endpoint = None exporter._region = "westus" exporter._instrumentation_key = "test-key" - + config = StatsbeatConfig.from_exporter(exporter) self.assertIsNone(config) @@ -123,30 +128,28 @@ def test_from_exporter_missing_region(self): exporter._endpoint = "https://westus-1.in.applicationinsights.azure.com/" exporter._region = None exporter._instrumentation_key = "test-key" - + config = StatsbeatConfig.from_exporter(exporter) self.assertIsNone(config) - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager._get_connection_string_for_region_from_config') + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager._get_connection_string_for_region_from_config") def test_from_config_valid(self, mock_get_cs_for_region): """Test creating config from base config and dictionary.""" mock_get_cs_for_region.return_value = "InstrumentationKey=4321abcd-5678-4efa-8abc-1234567890ab;IngestionEndpoint=https://westus-0.in.applicationinsights.azure.com/" - + base_config = StatsbeatConfig( endpoint="https://westus-1.in.applicationinsights.azure.com/", region="westus", instrumentation_key="test-key", credential="test_credential", disable_offline_storage=False, - distro_version="1.0.0" + distro_version="1.0.0", ) - - config_dict = { - 'disable_offline_storage': 'true' - } - + + config_dict = {"disable_offline_storage": "true"} + new_config = StatsbeatConfig.from_config(base_config, config_dict) - + self.assertIsNotNone(new_config) if new_config: self.assertEqual(new_config.endpoint, base_config.endpoint) @@ -155,24 +158,27 @@ def test_from_config_valid(self, mock_get_cs_for_region): self.assertTrue(new_config.disable_offline_storage) self.assertEqual(new_config.credential, "test_credential") self.assertEqual(new_config.distro_version, "1.0.0") - self.assertEqual(new_config.connection_string, "InstrumentationKey=4321abcd-5678-4efa-8abc-1234567890ab;IngestionEndpoint=https://westus-0.in.applicationinsights.azure.com/") + self.assertEqual( + new_config.connection_string, + "InstrumentationKey=4321abcd-5678-4efa-8abc-1234567890ab;IngestionEndpoint=https://westus-0.in.applicationinsights.azure.com/", + ) - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager._get_connection_string_for_region_from_config') + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager._get_connection_string_for_region_from_config") def test_from_config_fallback_connection_string(self, mock_get_cs_for_region): """Test that from_config falls back to base config connection string when needed.""" mock_get_cs_for_region.return_value = None - + base_config = StatsbeatConfig( endpoint="https://westus-1.in.applicationinsights.azure.com/", region="westus", instrumentation_key="test-key", - connection_string="InstrumentationKey=base;IngestionEndpoint=https://base.com/" + connection_string="InstrumentationKey=base;IngestionEndpoint=https://base.com/", ) - + config_dict = {} - + new_config = StatsbeatConfig.from_config(base_config, config_dict) - + self.assertIsNotNone(new_config) if new_config: self.assertEqual(new_config.connection_string, base_config.connection_string) @@ -180,11 +186,9 @@ def test_from_config_fallback_connection_string(self, mock_get_cs_for_region): def test_from_config_missing_instrumentation_key(self): """Test from_config with missing instrumentation key.""" base_config = StatsbeatConfig( - endpoint="https://westus-1.in.applicationinsights.azure.com/", - region="westus", - instrumentation_key="" + endpoint="https://westus-1.in.applicationinsights.azure.com/", region="westus", instrumentation_key="" ) - + result = StatsbeatConfig.from_config(base_config, {}) self.assertIsNone(result) @@ -194,24 +198,24 @@ def test_equality(self): endpoint="https://westus-1.in.applicationinsights.azure.com/", region="westus", instrumentation_key="test-key", - connection_string="InstrumentationKey=test;IngestionEndpoint=https://test.com/" + connection_string="InstrumentationKey=test;IngestionEndpoint=https://test.com/", ) - + config2 = StatsbeatConfig( endpoint="https://westus-1.in.applicationinsights.azure.com/", region="westus", instrumentation_key="test-key", - connection_string="InstrumentationKey=test;IngestionEndpoint=https://test.com/" + connection_string="InstrumentationKey=test;IngestionEndpoint=https://test.com/", ) - + config3 = StatsbeatConfig( endpoint="https://westus-1.in.applicationinsights.azure.com/", region="westus", instrumentation_key="test-key", disable_offline_storage=True, - connection_string="InstrumentationKey=test;IngestionEndpoint=https://test.com/" + connection_string="InstrumentationKey=test;IngestionEndpoint=https://test.com/", ) - + self.assertEqual(config1, config2) self.assertNotEqual(config1, config3) self.assertNotEqual(config1, "not a config") @@ -222,31 +226,31 @@ def test_hash(self): endpoint="https://westus-1.in.applicationinsights.azure.com/", region="westus", instrumentation_key="test-key", - connection_string="InstrumentationKey=test;IngestionEndpoint=https://test.com/" + connection_string="InstrumentationKey=test;IngestionEndpoint=https://test.com/", ) - + config2 = StatsbeatConfig( endpoint="https://westus-1.in.applicationinsights.azure.com/", region="westus", instrumentation_key="test-key", - connection_string="InstrumentationKey=test;IngestionEndpoint=https://test.com/" + connection_string="InstrumentationKey=test;IngestionEndpoint=https://test.com/", ) - + config3 = StatsbeatConfig( endpoint="https://westus-1.in.applicationinsights.azure.com/", region="westus", instrumentation_key="test-key", disable_offline_storage=True, - connection_string="InstrumentationKey=test;IngestionEndpoint=https://test.com/" + connection_string="InstrumentationKey=test;IngestionEndpoint=https://test.com/", ) - + self.assertEqual(hash(config1), hash(config2)) self.assertNotEqual(hash(config1), hash(config3)) # pylint: disable=protected-access class TestStatsbeatManager(unittest.TestCase): - + def setUp(self): """Set up test fixtures.""" os.environ[_APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL] = "false" @@ -258,22 +262,22 @@ def setUp(self): _STATSBEAT_STATE["SHUTDOWN"] = False _STATSBEAT_STATE["INITIAL_SUCCESS"] = False _STATSBEAT_STATE["INITIAL_FAILURE_COUNT"] = 0 - + # Create a fresh manager instance self.manager = StatsbeatManager() - if hasattr(self.manager, '_initialized'): + if hasattr(self.manager, "_initialized"): self.manager._initialized = False - if hasattr(self.manager, '_config'): + if hasattr(self.manager, "_config"): self.manager._config = None - if hasattr(self.manager, '_metrics'): + if hasattr(self.manager, "_metrics"): self.manager._metrics = None - if hasattr(self.manager, '_meter_provider'): + if hasattr(self.manager, "_meter_provider"): self.manager._meter_provider = None def tearDown(self): """Clean up after tests.""" try: - if hasattr(self, 'manager'): + if hasattr(self, "manager"): self.manager.shutdown() except Exception: pass @@ -283,10 +287,10 @@ def tearDown(self): del StatsbeatManager._instances[StatsbeatManager] def _create_valid_config( - self, - connection_string="InstrumentationKey=4321abcd-5678-4efa-8abc-1234567890ab;IngestionEndpoint=https://westus-0.in.applicationinsights.azure.com/", - disable_offline_storage=False - ): + self, + connection_string="InstrumentationKey=4321abcd-5678-4efa-8abc-1234567890ab;IngestionEndpoint=https://westus-0.in.applicationinsights.azure.com/", + disable_offline_storage=False, + ): """Helper to create a valid StatsbeatConfig.""" return StatsbeatConfig( endpoint="https://westus-1.in.applicationinsights.azure.com/", @@ -314,27 +318,19 @@ def test_validate_config_none(self): def test_validate_config_missing_instrumentation_key(self): """Test _validate_config with missing instrumentation key.""" config = StatsbeatConfig( - endpoint="https://westus-1.in.applicationinsights.azure.com/", - region="westus", - instrumentation_key="" + endpoint="https://westus-1.in.applicationinsights.azure.com/", region="westus", instrumentation_key="" ) self.assertFalse(StatsbeatManager._validate_config(config)) def test_validate_config_missing_endpoint(self): """Test _validate_config with missing endpoint.""" - config = StatsbeatConfig( - endpoint="", - region="westus", - instrumentation_key="test-key" - ) + config = StatsbeatConfig(endpoint="", region="westus", instrumentation_key="test-key") self.assertFalse(StatsbeatManager._validate_config(config)) def test_validate_config_missing_region(self): """Test _validate_config with missing region.""" config = StatsbeatConfig( - endpoint="https://westus-1.in.applicationinsights.azure.com/", - region="", - instrumentation_key="test-key" + endpoint="https://westus-1.in.applicationinsights.azure.com/", region="", instrumentation_key="test-key" ) self.assertFalse(StatsbeatManager._validate_config(config)) @@ -343,68 +339,69 @@ def test_validate_config_missing_connection_string(self): config = StatsbeatConfig( endpoint="https://westus-1.in.applicationinsights.azure.com/", region="westus", - instrumentation_key="test-key" + instrumentation_key="test-key", ) config.connection_string = "" self.assertFalse(StatsbeatManager._validate_config(config)) - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager.is_statsbeat_enabled') + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.is_statsbeat_enabled") def test_initialize_statsbeat_disabled(self, mock_is_enabled): """Test initialize when statsbeat is disabled.""" mock_is_enabled.return_value = False config = self._create_valid_config() - + result = self.manager.initialize(config) - + self.assertFalse(result) self.assertFalse(self.manager._initialized) - @patch('azure.monitor.opentelemetry.exporter.statsbeat._state.is_statsbeat_enabled') + @patch("azure.monitor.opentelemetry.exporter.statsbeat._state.is_statsbeat_enabled") def test_initialize_invalid_config(self, mock_is_enabled): """Test initialize with invalid configuration.""" mock_is_enabled.return_value = True - + result = self.manager.initialize(None) # type: ignore - + self.assertFalse(result) self.assertFalse(self.manager._initialized) - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager.MeterProvider') - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager.PeriodicExportingMetricReader') - @patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter') - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager._StatsbeatMetrics') - @patch('azure.monitor.opentelemetry.exporter.statsbeat._state.is_statsbeat_enabled') - def test_initialize_success(self, mock_is_enabled, mock_statsbeat_metrics, - mock_exporter_class, mock_reader_class, mock_meter_provider_class): + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.MeterProvider") + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.PeriodicExportingMetricReader") + @patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter") + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager._StatsbeatMetrics") + @patch("azure.monitor.opentelemetry.exporter.statsbeat._state.is_statsbeat_enabled") + def test_initialize_success( + self, mock_is_enabled, mock_statsbeat_metrics, mock_exporter_class, mock_reader_class, mock_meter_provider_class + ): """Test successful initialization.""" mock_is_enabled.return_value = True - + # Mock the exporter mock_exporter = Mock() mock_exporter_class.return_value = mock_exporter - + # Mock the reader mock_reader = Mock() mock_reader_class.return_value = mock_reader - + # Mock the meter provider mock_meter_provider = Mock() mock_meter_provider_class.return_value = mock_meter_provider - + # Mock the statsbeat metrics mock_metrics = Mock() mock_statsbeat_metrics.return_value = mock_metrics - + config = self._create_valid_config() - + result = self.manager.initialize(config) - + self.assertTrue(result) self.assertTrue(self.manager._initialized) self.assertEqual(self.manager._config, config) self.assertEqual(self.manager._meter_provider, mock_meter_provider) self.assertEqual(self.manager._metrics, mock_metrics) - + # Verify mocks were called correctly mock_exporter_class.assert_called_once() mock_reader_class.assert_called_once() @@ -413,66 +410,67 @@ def test_initialize_success(self, mock_is_enabled, mock_statsbeat_metrics, mock_meter_provider.force_flush.assert_called_once() mock_metrics.init_non_initial_metrics.assert_called_once() - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager.MeterProvider') - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager.PeriodicExportingMetricReader') - @patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter') - @patch('azure.monitor.opentelemetry.exporter.statsbeat._state.is_statsbeat_enabled') - def test_initialize_failure_exception(self, mock_is_enabled, mock_exporter_class, - mock_reader_class, mock_meter_provider_class): + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.MeterProvider") + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.PeriodicExportingMetricReader") + @patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter") + @patch("azure.monitor.opentelemetry.exporter.statsbeat._state.is_statsbeat_enabled") + def test_initialize_failure_exception( + self, mock_is_enabled, mock_exporter_class, mock_reader_class, mock_meter_provider_class + ): """Test initialization failure due to exception.""" mock_is_enabled.return_value = True mock_exporter_class.side_effect = ValueError("Test error") - + config = self._create_valid_config() - + result = self.manager.initialize(config) - + self.assertFalse(result) self.assertFalse(self.manager._initialized) - @patch('azure.monitor.opentelemetry.exporter.statsbeat._state.is_statsbeat_enabled') + @patch("azure.monitor.opentelemetry.exporter.statsbeat._state.is_statsbeat_enabled") def test_initialize_already_initialized_same_config(self, mock_is_enabled): """Test initialize when already initialized with same config.""" mock_is_enabled.return_value = True config = self._create_valid_config() - + # Mock initialized state self.manager._initialized = True self.manager._config = config - + result = self.manager.initialize(config) - + self.assertTrue(result) - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager.is_statsbeat_enabled') + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.is_statsbeat_enabled") def test_initialize_already_initialized_different_config_cs(self, mock_is_enabled): """Test initialize when already initialized with different config.""" mock_is_enabled.return_value = True - + old_config = self._create_valid_config() new_config = StatsbeatConfig( endpoint="https://eastus-1.in.applicationinsights.azure.com/", region="eastus", instrumentation_key="different-key", - connection_string="InstrumentationKey=4321abcd-5678-4efa-8abc-1234567890ab;IngestionEndpoint=https://eastus-0.in.applicationinsights.azure.com/" + connection_string="InstrumentationKey=4321abcd-5678-4efa-8abc-1234567890ab;IngestionEndpoint=https://eastus-0.in.applicationinsights.azure.com/", ) - + # Mock initialized state self.manager._initialized = True self.manager._config = old_config - - with patch.object(self.manager, '_reconfigure') as mock_reconfigure: + + with patch.object(self.manager, "_reconfigure") as mock_reconfigure: mock_reconfigure.return_value = True result = self.manager.initialize(new_config) - + self.assertTrue(result) mock_reconfigure.assert_called_once_with(new_config) - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager.is_statsbeat_enabled') + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.is_statsbeat_enabled") def test_initialize_already_initialized_different_config_storage(self, mock_is_enabled): """Test initialize when already initialized with different config.""" mock_is_enabled.return_value = True - + old_config = self._create_valid_config() new_config = StatsbeatConfig( endpoint="https://eastus-1.in.applicationinsights.azure.com/", @@ -480,15 +478,15 @@ def test_initialize_already_initialized_different_config_storage(self, mock_is_e instrumentation_key="different-key", disable_offline_storage=False, ) - + # Mock initialized state self.manager._initialized = True self.manager._config = old_config - - with patch.object(self.manager, '_reconfigure') as mock_reconfigure: + + with patch.object(self.manager, "_reconfigure") as mock_reconfigure: mock_reconfigure.return_value = True result = self.manager.initialize(new_config) - + self.assertTrue(result) mock_reconfigure.assert_called_once_with(new_config) @@ -497,16 +495,16 @@ def test_shutdown_not_initialized(self): result = self.manager.shutdown() self.assertFalse(result) - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager.set_statsbeat_shutdown') + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.set_statsbeat_shutdown") def test_shutdown_success(self, mock_set_shutdown): """Test successful shutdown.""" # Mock initialized state self.manager._initialized = True mock_meter_provider = Mock() self.manager._meter_provider = mock_meter_provider - + result = self.manager.shutdown() - + self.assertTrue(result) self.assertFalse(self.manager._initialized) self.assertIsNone(self.manager._meter_provider) @@ -514,12 +512,12 @@ def test_shutdown_success(self, mock_set_shutdown): self.assertIsNone(self.manager._config) mock_meter_provider.shutdown.assert_called_once() mock_set_shutdown.assert_called_once_with(True) - + # Singleton is not cleared upon shutdown manager2 = StatsbeatManager() self.assertIs(self.manager, manager2) - @patch('azure.monitor.opentelemetry.exporter.statsbeat._state.set_statsbeat_shutdown') + @patch("azure.monitor.opentelemetry.exporter.statsbeat._state.set_statsbeat_shutdown") def test_shutdown_meter_provider_exception(self, mock_set_shutdown): """Test shutdown when meter provider raises exception.""" # Mock initialized state @@ -527,9 +525,9 @@ def test_shutdown_meter_provider_exception(self, mock_set_shutdown): mock_meter_provider = Mock() mock_meter_provider.shutdown.side_effect = Exception("Shutdown error") self.manager._meter_provider = mock_meter_provider - + result = self.manager.shutdown() - + self.assertFalse(result) self.assertFalse(self.manager._initialized) self.assertIsNone(self.manager._meter_provider) @@ -539,12 +537,13 @@ def test_shutdown_meter_provider_exception(self, mock_set_shutdown): mock_meter_provider.shutdown.assert_called_once() mock_set_shutdown.assert_not_called() - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager.MeterProvider') - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager.PeriodicExportingMetricReader') - @patch('azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter') - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager._StatsbeatMetrics') - def test_reconfigure_success(self, mock_statsbeat_metrics, mock_exporter_class, - mock_reader_class, mock_meter_provider_class): + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.MeterProvider") + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.PeriodicExportingMetricReader") + @patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter") + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager._StatsbeatMetrics") + def test_reconfigure_success( + self, mock_statsbeat_metrics, mock_exporter_class, mock_reader_class, mock_meter_provider_class + ): """Test successful reconfiguration.""" # Mock initialized state with old config old_config = self._create_valid_config() @@ -552,7 +551,7 @@ def test_reconfigure_success(self, mock_statsbeat_metrics, mock_exporter_class, self.manager._config = old_config mock_old_meter_provider = Mock() self.manager._meter_provider = mock_old_meter_provider - + # Mock new components mock_exporter = Mock() mock_exporter_class.return_value = mock_exporter @@ -562,25 +561,25 @@ def test_reconfigure_success(self, mock_statsbeat_metrics, mock_exporter_class, mock_meter_provider_class.return_value = mock_meter_provider mock_metrics = Mock() mock_statsbeat_metrics.return_value = mock_metrics - + new_config = StatsbeatConfig( endpoint="https://eastus-1.in.applicationinsights.azure.com/", - region="eastus", + region="eastus", instrumentation_key="new-key", - connection_string="InstrumentationKey=new;IngestionEndpoint=https://new.com/" + connection_string="InstrumentationKey=new;IngestionEndpoint=https://new.com/", ) - + result = self.manager._reconfigure(new_config) - + self.assertTrue(result) self.assertTrue(self.manager._initialized) self.assertEqual(self.manager._config, new_config) - + # Verify old meter provider was shutdown mock_old_meter_provider.force_flush.assert_called_once() mock_old_meter_provider.shutdown.assert_called_once() - @patch('azure.monitor.opentelemetry.exporter.statsbeat._manager.MeterProvider') + @patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.MeterProvider") def test_reconfigure_flush_failure(self, mock_meter_provider_class): """Test reconfiguration failure.""" # Mock initialized state @@ -589,18 +588,18 @@ def test_reconfigure_flush_failure(self, mock_meter_provider_class): self.manager._config = old_config mock_old_meter_provider = Mock() self.manager._meter_provider = mock_old_meter_provider - + mock_meter_provider_class.force_flush.side_effect = Exception("Reconfigure error") - + new_config = StatsbeatConfig( endpoint="https://eastus-1.in.applicationinsights.azure.com/", region="eastus", instrumentation_key="new-key", - connection_string="InstrumentationKey=new;IngestionEndpoint=https://new.com/" + connection_string="InstrumentationKey=new;IngestionEndpoint=https://new.com/", ) - + result = self.manager._reconfigure(new_config) - + # We still reinitialize the manager state even on flush/shutdown failure self.assertTrue(result) self.assertTrue(self.manager._initialized) @@ -614,9 +613,9 @@ def test_get_current_config_initialized(self): """Test get_current_config when initialized.""" original_config = self._create_valid_config() self.manager._config = original_config - + result = self.manager.get_current_config() - + self.assertIsNotNone(result) if result: # Should be a copy, not the same object @@ -634,9 +633,9 @@ def test_cleanup_with_shutdown(self): self.manager._metrics = Mock() config_mock = Mock() self.manager._config = config_mock - + self.manager._cleanup(shutdown_meter_provider=True) - + self.assertFalse(self.manager._initialized) self.assertIsNone(self.manager._meter_provider) self.assertIsNone(self.manager._metrics) @@ -653,9 +652,9 @@ def test_cleanup_without_shutdown(self): self.manager._metrics = Mock() config_mock = Mock() self.manager._config = config_mock - + self.manager._cleanup(shutdown_meter_provider=False) - + self.assertFalse(self.manager._initialized) self.assertIsNone(self.manager._meter_provider) self.assertIsNone(self.manager._metrics) @@ -673,10 +672,10 @@ def test_cleanup_meter_provider_exception(self): self.manager._metrics = Mock() config_mock = Mock() self.manager._config = config_mock - + # Should not raise exception self.manager._cleanup(shutdown_meter_provider=True) - + self.assertFalse(self.manager._initialized) self.assertIsNone(self.manager._meter_provider) self.assertIsNone(self.manager._metrics) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/statsbeat/test_metrics.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/statsbeat/test_metrics.py index 2f98c32270a3..6530a2da7c79 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/statsbeat/test_metrics.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/statsbeat/test_metrics.py @@ -55,6 +55,7 @@ def func(*_args, **_kwargs): _StatsbeatMetrics_FEATURE_ATTRIBUTES = dict(_StatsbeatMetrics._FEATURE_ATTRIBUTES) _StatsbeatMetrics_INSTRUMENTATION_ATTRIBUTES = dict(_StatsbeatMetrics._INSTRUMENTATION_ATTRIBUTES) + # pylint: disable=protected-access class TestStatsbeatMetrics(unittest.TestCase): @classmethod @@ -86,7 +87,7 @@ def setUp(self): _STATSBEAT_STATE["CUSTOM_EVENTS_FEATURE_SET"] = False _STATSBEAT_STATE["LIVE_METRICS_FEATURE_SET"] = False _STATSBEAT_STATE["CUSTOMER_SDKSTATS_FEATURE_SET"] = False - + _StatsbeatMetrics._COMMON_ATTRIBUTES = dict(_StatsbeatMetrics_COMMON_ATTRS) _StatsbeatMetrics._NETWORK_ATTRIBUTES = dict(_StatsbeatMetrics_NETWORK_ATTRS) _StatsbeatMetrics._FEATURE_ATTRIBUTES = dict(_StatsbeatMetrics_FEATURE_ATTRIBUTES) @@ -950,4 +951,5 @@ def test_shorten_host(self): url = "http://fakehost-5/" self.assertEqual(_shorten_host(url), "fakehost-5") + # cSpell:enable diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/statsbeat/test_statsbeat.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/statsbeat/test_statsbeat.py index d86a409e731f..406293cf212c 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/statsbeat/test_statsbeat.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/statsbeat/test_statsbeat.py @@ -65,14 +65,14 @@ def test_collect_statsbeat_metrics(self, mock_exporter, mock_reader, mock_meter_ exporter._disable_offline_storage = False exporter._credential = None exporter._distro_version = "" - + # Set up mock returns mock_exporter_instance = mock.Mock() mock_exporter.return_value = mock_exporter_instance - + mock_reader_instance = mock.Mock() mock_reader.return_value = mock_reader_instance - + mock_meter_provider_instance = mock.Mock() mock_meter_provider.return_value = mock_meter_provider_instance flush_mock = mock.Mock() @@ -80,13 +80,13 @@ def test_collect_statsbeat_metrics(self, mock_exporter, mock_reader, mock_meter_ mock_statsbeat_metrics_instance = mock.Mock() mock_statsbeat_metrics.return_value = mock_statsbeat_metrics_instance - + manager = StatsbeatManager() self.assertFalse(manager._initialized) - + # Act _statsbeat.collect_statsbeat_metrics(exporter) - + # Assert - verify manager is initialized self.assertTrue(manager._initialized) self.assertEqual(manager._metrics, mock_statsbeat_metrics_instance) @@ -100,7 +100,7 @@ def test_collect_statsbeat_metrics(self, mock_exporter, mock_reader, mock_meter_ self.assertEqual(config.disable_offline_storage, exporter._disable_offline_storage) self.assertEqual(config.credential, exporter._credential) self.assertEqual(config.distro_version, exporter._distro_version) - + # Verify statsbeat metrics creation metrics = manager._metrics mock_statsbeat_metrics.assert_called_once_with( @@ -110,9 +110,9 @@ def test_collect_statsbeat_metrics(self, mock_exporter, mock_reader, mock_meter_ exporter._disable_offline_storage, 2, False, - exporter._distro_version + exporter._distro_version, ) - + # Verify initialization methods were called flush_mock.assert_called_once() mock_statsbeat_metrics_instance.init_non_initial_metrics.assert_called_once() @@ -122,7 +122,9 @@ def test_collect_statsbeat_metrics(self, mock_exporter, mock_reader, mock_meter_ @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.MeterProvider") @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.PeriodicExportingMetricReader") @mock.patch("azure.monitor.opentelemetry.exporter.AzureMonitorMetricExporter") - def test_collect_statsbeat_metrics_registers_configuration_callback(self, mock_exporter, mock_reader, mock_meter_provider, mock_get_manager, mock_get_configuration_manager): + def test_collect_statsbeat_metrics_registers_configuration_callback( + self, mock_exporter, mock_reader, mock_meter_provider, mock_get_manager, mock_get_configuration_manager + ): """Test that collect_statsbeat_metrics registers a configuration callback when initialized successfully.""" # Arrange exporter = mock.Mock() @@ -131,32 +133,32 @@ def test_collect_statsbeat_metrics_registers_configuration_callback(self, mock_e exporter._disable_offline_storage = False exporter._credential = None exporter._distro_version = "" - + # Set up mock returns mock_exporter_instance = mock.Mock() mock_exporter.return_value = mock_exporter_instance - + mock_reader_instance = mock.Mock() mock_reader.return_value = mock_reader_instance - + mock_meter_provider_instance = mock.Mock() mock_meter_provider.return_value = mock_meter_provider_instance flush_mock = mock.Mock() mock_meter_provider_instance.force_flush = flush_mock - + manager = mock.Mock() manager.initialize.return_value = True mock_get_manager.return_value = manager mock_config_manager_instance = mock.Mock() mock_get_configuration_manager.return_value = mock_config_manager_instance - + # Act _statsbeat.collect_statsbeat_metrics(exporter) - + # Assert - verify manager is initialized self.assertTrue(manager._initialized) - + # Verify that the configuration manager callback was registered mock_config_manager_instance.register_callback.assert_called_once() @@ -167,28 +169,30 @@ def test_collect_statsbeat_metrics_registers_configuration_callback(self, mock_e @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.evaluate_feature") @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.get_statsbeat_manager") @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.StatsbeatConfig") - def test_get_statsbeat_configuration_callback_successful_update(self, mock_statsbeat_config_cls, mock_get_manager, mock_evaluate_feature): + def test_get_statsbeat_configuration_callback_successful_update( + self, mock_statsbeat_config_cls, mock_get_manager, mock_evaluate_feature + ): """Test that configuration callback successfully updates configuration when statsbeat is initialized.""" # Arrange mock_manager_instance = mock.Mock() mock_get_manager.return_value = mock_manager_instance - + # Create mock current config current_config = mock.Mock() mock_manager_instance.get_current_config.return_value = current_config - + # Create mock updated config updated_config = mock.Mock() mock_statsbeat_config_cls.from_config.return_value = updated_config # mock evaluate_feature to return True (indicating SDK stats should be enabled) mock_evaluate_feature.return_value = True - + settings = {"disable_offline_storage": "true"} - + # Act _statsbeat.get_statsbeat_configuration_callback(settings) - + # Assert mock_evaluate_feature.assert_called_once() mock_get_manager.assert_called_once() @@ -200,28 +204,30 @@ def test_get_statsbeat_configuration_callback_successful_update(self, mock_stats @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.evaluate_feature") @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.get_statsbeat_manager") @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.StatsbeatConfig") - def test_get_statsbeat_configuration_callback_disable_sdkstats(self, mock_statsbeat_config_cls, mock_get_manager, mock_evaluate_feature): + def test_get_statsbeat_configuration_callback_disable_sdkstats( + self, mock_statsbeat_config_cls, mock_get_manager, mock_evaluate_feature + ): """Test that configuration callback successfully updates configuration when statsbeat is initialized.""" # Arrange mock_manager_instance = mock.Mock() mock_get_manager.return_value = mock_manager_instance - + # Create mock current config current_config = mock.Mock() mock_manager_instance.get_current_config.return_value = current_config - + # Create mock updated config updated_config = mock.Mock() mock_statsbeat_config_cls.from_config.return_value = updated_config # mock evaluate_feature to return False (indicating SDK stats should be disabled) mock_evaluate_feature.return_value = False - + settings = {"disable_offline_storage": "true"} - + # Act _statsbeat.get_statsbeat_configuration_callback(settings) - + # Assert mock_evaluate_feature.assert_called_once() mock_get_manager.assert_called_once() @@ -233,7 +239,9 @@ def test_get_statsbeat_configuration_callback_disable_sdkstats(self, mock_statsb @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.evaluate_feature") @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.get_statsbeat_manager") @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.StatsbeatConfig") - def test_get_statsbeat_configuration_callback_not_initialized(self, mock_statsbeat_config_cls, mock_get_manager, mock_evaluate_feature): + def test_get_statsbeat_configuration_callback_not_initialized( + self, mock_statsbeat_config_cls, mock_get_manager, mock_evaluate_feature + ): """Test that configuration callback handles case when statsbeat is not initialized.""" # Arrange mock_manager_instance = mock.Mock() @@ -241,12 +249,12 @@ def test_get_statsbeat_configuration_callback_not_initialized(self, mock_statsbe mock_manager_instance.get_current_config.return_value = None # mock evaluate_feature to return True (indicating SDK stats should be enabled) mock_evaluate_feature.return_value = True - + settings = {"disable_offline_storage": "true"} - + # Act _statsbeat.get_statsbeat_configuration_callback(settings) - + # Assert mock_get_manager.assert_called_once() mock_manager_instance.get_current_config.assert_called_once() @@ -256,27 +264,29 @@ def test_get_statsbeat_configuration_callback_not_initialized(self, mock_statsbe @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.evaluate_feature") @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.get_statsbeat_manager") @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.StatsbeatConfig") - def test_get_statsbeat_configuration_callback_no_updated_config(self, mock_statsbeat_config_cls, mock_get_manager, mock_evaluate_feature): + def test_get_statsbeat_configuration_callback_no_updated_config( + self, mock_statsbeat_config_cls, mock_get_manager, mock_evaluate_feature + ): """Test that configuration callback handles case when StatsbeatConfig.from_config returns None.""" # Arrange mock_manager_instance = mock.Mock() mock_get_manager.return_value = mock_manager_instance - + # Create mock current config current_config = mock.Mock() mock_manager_instance.get_current_config.return_value = current_config # mock evaluate_feature to return True (indicating SDK stats should be enabled) mock_evaluate_feature.return_value = True - + # Mock from_config to return False (indicating no valid update) mock_statsbeat_config_cls.from_config.return_value = False - + settings = {"disable_offline_storage": "invalid_value"} - + # Act _statsbeat.get_statsbeat_configuration_callback(settings) - + # Assert mock_statsbeat_config_cls.from_config.assert_called_once_with(current_config, settings) mock_get_manager.assert_called_once() @@ -287,7 +297,9 @@ def test_get_statsbeat_configuration_callback_no_updated_config(self, mock_stats @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.MeterProvider") @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.PeriodicExportingMetricReader") @mock.patch("azure.monitor.opentelemetry.exporter.AzureMonitorMetricExporter") - def test_collect_statsbeat_metrics_no_callback_when_init_fails(self, mock_exporter, mock_reader, mock_meter_provider, mock_statsbeat_metrics, mock_config_manager_cls): + def test_collect_statsbeat_metrics_no_callback_when_init_fails( + self, mock_exporter, mock_reader, mock_meter_provider, mock_statsbeat_metrics, mock_config_manager_cls + ): """Test that configuration callback is not registered when initialization fails.""" # Arrange exporter = mock.Mock() @@ -296,19 +308,19 @@ def test_collect_statsbeat_metrics_no_callback_when_init_fails(self, mock_export exporter._disable_offline_storage = False exporter._credential = None exporter._distro_version = "" - + mock_config_manager_instance = mock.Mock() mock_config_manager_cls.return_value = mock_config_manager_instance - + manager = StatsbeatManager() self.assertFalse(manager._initialized) - + # Act _statsbeat.collect_statsbeat_metrics(exporter) - + # Assert - verify manager is still not initialized self.assertFalse(manager._initialized) - + # Verify that the configuration manager callback was NOT registered mock_config_manager_instance.register_callback.assert_not_called() @@ -317,7 +329,9 @@ def test_collect_statsbeat_metrics_no_callback_when_init_fails(self, mock_export @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.MeterProvider") @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.PeriodicExportingMetricReader") @mock.patch("azure.monitor.opentelemetry.exporter.export.metrics._exporter.AzureMonitorMetricExporter") - def test_collect_statsbeat_metrics_exists(self, mock_exporter, mock_reader, mock_meter_provider, mock_statsbeat_metrics, mock_get_manager): + def test_collect_statsbeat_metrics_exists( + self, mock_exporter, mock_reader, mock_meter_provider, mock_statsbeat_metrics, mock_get_manager + ): """Test that collect_statsbeat_metrics reuses existing configuration when called multiple times with same config.""" # Arrange exporter = mock.Mock() @@ -326,14 +340,14 @@ def test_collect_statsbeat_metrics_exists(self, mock_exporter, mock_reader, mock exporter._disable_offline_storage = False exporter._credential = None exporter._distro_version = "" - + # Set up mock returns mock_exporter_instance = mock.Mock() mock_exporter.return_value = mock_exporter_instance - + mock_reader_instance = mock.Mock() mock_reader.return_value = mock_reader_instance - + mock_meter_provider_instance = mock.Mock() mock_meter_provider.return_value = mock_meter_provider_instance flush_mock = mock.Mock() @@ -341,34 +355,34 @@ def test_collect_statsbeat_metrics_exists(self, mock_exporter, mock_reader, mock mock_statsbeat_metrics_instance = mock.Mock() mock_statsbeat_metrics.return_value = mock_statsbeat_metrics_instance - + manager = StatsbeatManager() self.assertFalse(manager._initialized) mock_get_manager.return_value = manager - + # Act - Initialize first time _statsbeat.collect_statsbeat_metrics(exporter) first_metrics = manager._metrics self.assertTrue(manager._initialized) self.assertEqual(first_metrics, mock_statsbeat_metrics_instance) - + # Verify first initialization called the mocks self.assertEqual(mock_statsbeat_metrics.call_count, 1) self.assertEqual(mock_meter_provider.call_count, 1) - + # Act - Initialize second time with same config _statsbeat.collect_statsbeat_metrics(exporter) second_metrics = manager._metrics - + # Assert - should reuse existing config since it's the same self.assertTrue(manager._initialized) self.assertIsNotNone(second_metrics) self.assertEqual(first_metrics, second_metrics) - + # Verify mocks were NOT called again since config is the same self.assertEqual(mock_statsbeat_metrics.call_count, 1) # Still only called once - self.assertEqual(mock_meter_provider.call_count, 1) # Still only called once - + self.assertEqual(mock_meter_provider.call_count, 1) # Still only called once + # Verify only one call to flush (from first initialization) flush_mock.assert_called_once() mock_statsbeat_metrics_instance.init_non_initial_metrics.assert_called_once() @@ -386,23 +400,23 @@ def test_collect_statsbeat_metrics_non_eu(self, mock_exporter, mock_reader, mock exporter._disable_offline_storage = False exporter._credential = None exporter._distro_version = "" - + # Set up mock returns mock_exporter_instance = mock.Mock() mock_exporter.return_value = mock_exporter_instance - + mock_reader_instance = mock.Mock() mock_reader.return_value = mock_reader_instance - + mock_meter_provider_instance = mock.Mock() mock_meter_provider.return_value = mock_meter_provider_instance flush_mock = mock.Mock() mock_meter_provider_instance.force_flush = flush_mock - + manager = StatsbeatManager() self.assertFalse(manager._initialized) mock_get_manager.return_value = manager - + with mock.patch.dict( os.environ, { @@ -411,16 +425,16 @@ def test_collect_statsbeat_metrics_non_eu(self, mock_exporter, mock_reader, mock ): # Act _statsbeat.collect_statsbeat_metrics(exporter) - + # Assert self.assertTrue(manager._initialized) self.assertIsNotNone(manager._metrics) - + # Verify that AzureMonitorMetricExporter was called with the correct connection string mock_exporter.assert_called_once() call_args = mock_exporter.call_args # The connection string should be the non-EU default since the endpoint is non-EU - expected_connection_string = call_args[1]['connection_string'] + expected_connection_string = call_args[1]["connection_string"] self.assertIn(_DEFAULT_NON_EU_STATS_CONNECTION_STRING.split(";")[0].split("=")[1], expected_connection_string) @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.get_statsbeat_manager") @@ -436,23 +450,23 @@ def test_collect_statsbeat_metrics_eu(self, mock_exporter, mock_reader, mock_met exporter._disable_offline_storage = False exporter._credential = None exporter._distro_version = "" - + # Set up mock returns mock_exporter_instance = mock.Mock() mock_exporter.return_value = mock_exporter_instance - + mock_reader_instance = mock.Mock() mock_reader.return_value = mock_reader_instance - + mock_meter_provider_instance = mock.Mock() mock_meter_provider.return_value = mock_meter_provider_instance flush_mock = mock.Mock() mock_meter_provider_instance.force_flush = flush_mock - + manager = StatsbeatManager() self.assertFalse(manager._initialized) mock_get_manager.return_value = manager - + with mock.patch.dict( os.environ, { @@ -461,16 +475,16 @@ def test_collect_statsbeat_metrics_eu(self, mock_exporter, mock_reader, mock_met ): # Act _statsbeat.collect_statsbeat_metrics(exporter) - + # Assert self.assertTrue(manager._initialized) self.assertIsNotNone(manager._metrics) - + # Verify that AzureMonitorMetricExporter was called with the correct connection string mock_exporter.assert_called_once() call_args = mock_exporter.call_args # The connection string should be the EU default since the endpoint is EU - expected_connection_string = call_args[1]['connection_string'] + expected_connection_string = call_args[1]["connection_string"] self.assertIn(_DEFAULT_EU_STATS_CONNECTION_STRING.split(";")[0].split("=")[1], expected_connection_string) @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.get_statsbeat_manager") @@ -494,10 +508,10 @@ def test_collect_statsbeat_metrics_aad(self, mock_exporter, mock_reader, mock_me manager = StatsbeatManager() self.assertFalse(manager._initialized) mock_get_manager.return_value = manager - + # Act _statsbeat.collect_statsbeat_metrics(exporter) - + # Assert - Verify _StatsbeatMetrics was called with correct parameters self.assertIsNotNone(manager._metrics) self.assertEqual(manager._metrics._ikey, TEST_IKEY) @@ -524,10 +538,10 @@ def test_collect_statsbeat_metrics_no_aad(self, mock_exporter, mock_reader, mock manager = StatsbeatManager() self.assertFalse(manager._initialized) mock_get_manager.return_value = manager - + # Act _statsbeat.collect_statsbeat_metrics(exporter) - + # Assert - Verify _StatsbeatMetrics was called with correct parameters self.assertIsNotNone(manager._metrics) self.assertEqual(manager._metrics._ikey, TEST_IKEY) @@ -537,7 +551,9 @@ def test_collect_statsbeat_metrics_no_aad(self, mock_exporter, mock_reader, mock @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.MeterProvider") @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.PeriodicExportingMetricReader") @mock.patch("azure.monitor.opentelemetry.exporter.AzureMonitorMetricExporter") - def test_collect_statsbeat_metrics_distro_version(self, mock_exporter, mock_reader, mock_meter_provider, mock_get_manager): + def test_collect_statsbeat_metrics_distro_version( + self, mock_exporter, mock_reader, mock_meter_provider, mock_get_manager + ): """Test collect_statsbeat_metrics with distribution version.""" # Arrange exporter = mock.Mock() @@ -554,10 +570,10 @@ def test_collect_statsbeat_metrics_distro_version(self, mock_exporter, mock_read manager = StatsbeatManager() self.assertFalse(manager._initialized) mock_get_manager.return_value = manager - + # Act _statsbeat.collect_statsbeat_metrics(exporter) - + # Assert - Verify _StatsbeatMetrics was called with correct parameters self.assertIsNotNone(manager._metrics) self.assertEqual(manager._metrics._ikey, TEST_IKEY) @@ -567,7 +583,9 @@ def test_collect_statsbeat_metrics_distro_version(self, mock_exporter, mock_read @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.MeterProvider") @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.PeriodicExportingMetricReader") @mock.patch("azure.monitor.opentelemetry.exporter.AzureMonitorMetricExporter") - def test_collect_statsbeat_metrics_local_storage(self, mock_exporter, mock_reader, mock_meter_provider, mock_get_manager): + def test_collect_statsbeat_metrics_local_storage( + self, mock_exporter, mock_reader, mock_meter_provider, mock_get_manager + ): """Test collect_statsbeat_metrics with local storage.""" # Arrange exporter = mock.Mock() @@ -584,10 +602,10 @@ def test_collect_statsbeat_metrics_local_storage(self, mock_exporter, mock_reade manager = StatsbeatManager() self.assertFalse(manager._initialized) mock_get_manager.return_value = manager - + # Act _statsbeat.collect_statsbeat_metrics(exporter) - + # Assert - Verify _StatsbeatMetrics was called with correct parameters self.assertIsNotNone(manager._metrics) self.assertEqual(manager._metrics._ikey, TEST_IKEY) @@ -598,7 +616,9 @@ def test_collect_statsbeat_metrics_local_storage(self, mock_exporter, mock_reade @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._manager.PeriodicExportingMetricReader") @mock.patch("azure.monitor.opentelemetry.exporter.AzureMonitorMetricExporter") @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.get_statsbeat_manager") - def test_shutdown_statsbeat_metrics(self, mock_get_manager, mock_exporter, mock_reader, mock_meter_provider, mock_statsbeat_metrics): + def test_shutdown_statsbeat_metrics( + self, mock_get_manager, mock_exporter, mock_reader, mock_meter_provider, mock_statsbeat_metrics + ): """Test shutdown_statsbeat_metrics after initialization.""" # Arrange - First initialize statsbeat exporter = mock.Mock() @@ -608,14 +628,14 @@ def test_shutdown_statsbeat_metrics(self, mock_get_manager, mock_exporter, mock_ exporter._disable_offline_storage = False exporter._credential = None exporter._distro_version = "" - + # Set up mock returns mock_exporter_instance = mock.Mock() mock_exporter.return_value = mock_exporter_instance - + mock_reader_instance = mock.Mock() mock_reader.return_value = mock_reader_instance - + mock_meter_provider_instance = mock.Mock() mock_meter_provider.return_value = mock_meter_provider_instance flush_mock = mock.Mock() @@ -625,21 +645,21 @@ def test_shutdown_statsbeat_metrics(self, mock_get_manager, mock_exporter, mock_ mock_statsbeat_metrics_instance = mock.Mock() mock_statsbeat_metrics.return_value = mock_statsbeat_metrics_instance - + # Create a real manager instance for initialization manager = StatsbeatManager() # Mock get_statsbeat_manager to return our initialized manager mock_get_manager.return_value = manager - + # Act - Initialize first _statsbeat.collect_statsbeat_metrics(exporter) self.assertTrue(manager.is_initialized()) self.assertFalse(_STATSBEAT_STATE["SHUTDOWN"]) - + # Act - Test shutdown result = _statsbeat.shutdown_statsbeat_metrics() - + # Assert self.assertTrue(result) self.assertFalse(manager.is_initialized()) @@ -652,16 +672,17 @@ def test_shutdown_statsbeat_metrics_not_initialized(self, mock_get_manager): # Arrange manager = StatsbeatManager() self.assertFalse(manager.is_initialized()) - + # Mock get_statsbeat_manager to return our uninitialized manager mock_get_manager.return_value = manager - + # Act - Test shutdown when not initialized result = _statsbeat.shutdown_statsbeat_metrics() - + # Assert self.assertFalse(result) # Should return False when not initialized self.assertFalse(manager.is_initialized()) mock_get_manager.assert_called_once() + # cSpell:enable diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_base_customer_sdkstats.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_base_customer_sdkstats.py index 69ac924e4212..bf8bdc4f37be 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_base_customer_sdkstats.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_base_customer_sdkstats.py @@ -13,8 +13,8 @@ BaseExporter, ExportResult, ) -from azure.monitor.opentelemetry.exporter._generated import AzureMonitorClient -from azure.monitor.opentelemetry.exporter._generated.models import ( +from azure.monitor.opentelemetry.exporter._generated.exporter import AzureMonitorExporterClient +from azure.monitor.opentelemetry.exporter._generated.exporter.models import ( TelemetryItem, TrackResponse, TelemetryErrorDetails, @@ -38,8 +38,10 @@ track_dropped_items_from_storage, ) + class MockResponse: """Mock response object for HTTP requests""" + def __init__(self, status_code, content): self.status_code = status_code self.content = content @@ -49,6 +51,19 @@ def __init__(self, status_code, content): self.raw.enforce_content_length = True self.reason = "Mock Reason" # Add the reason attribute self.url = "http://mock-url.com" # Add the url attribute + self._content_consumed = False + + def iter_content(self, chunk_size=1): + content_bytes = self.content.encode() if isinstance(self.content, str) else self.content + for i in range(0, len(content_bytes), chunk_size): + yield content_bytes[i : i + chunk_size] + self._content_consumed = True + + def iter_bytes(self, chunk_size=None): + return self.iter_content(chunk_size or 1) + + def close(self): + pass class TestBaseExporterCustomerSdkStats(unittest.TestCase): @@ -57,34 +72,28 @@ class TestBaseExporterCustomerSdkStats(unittest.TestCase): @classmethod def setUpClass(cls): """Set up class-level resources including a single customer stats manager""" - from azure.monitor.opentelemetry.exporter._generated.models import TelemetryEventData, MonitorBase - + from azure.monitor.opentelemetry.exporter._generated.exporter.models import TelemetryEventData, MonitorBase + os.environ.pop("APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW", None) os.environ["APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW"] = "true" - + # Patch _should_collect_customer_sdkstats instance method to always return True for all tests cls._should_collect_patch = mock.patch( - 'azure.monitor.opentelemetry.exporter.export._base.BaseExporter._should_collect_customer_sdkstats', - return_value=True + "azure.monitor.opentelemetry.exporter.export._base.BaseExporter._should_collect_customer_sdkstats", + return_value=True, ) cls._should_collect_patch.start() - + # Patch collect_customer_sdkstats to prevent actual initialization cls._collect_customer_sdkstats_patch = mock.patch( - 'azure.monitor.opentelemetry.exporter.statsbeat.customer.collect_customer_sdkstats' + "azure.monitor.opentelemetry.exporter.statsbeat.customer.collect_customer_sdkstats" ) cls._collect_customer_sdkstats_patch.start() - + # Create reusable test data structure for TelemetryItem - base_data = TelemetryEventData( - name="test_event", - properties={"test_property": "test_value"} - ) - monitor_base = MonitorBase( - base_type="EventData", - base_data=base_data - ) - + base_data = TelemetryEventData(name="test_event", properties={"test_property": "test_value"}) + monitor_base = MonitorBase(base_type="EventData", base_data=base_data) + cls._envelopes_to_export = [ TelemetryItem( name="test_envelope", @@ -101,27 +110,27 @@ def tearDownClass(cls): # Stop the patches cls._should_collect_patch.stop() cls._collect_customer_sdkstats_patch.stop() - + # Clean up environment os.environ.pop("APPLICATIONINSIGHTS_SDKSTATS_ENABLED_PREVIEW", None) def _create_exporter_with_customer_sdkstats_enabled(self, disable_offline_storage=True): """Helper method to create an exporter with customer sdkstats enabled""" - + exporter = BaseExporter( connection_string="InstrumentationKey=12345678-1234-5678-abcd-12345678abcd", disable_offline_storage=disable_offline_storage, ) - + return exporter @mock.patch("azure.monitor.opentelemetry.exporter.export._base.track_successful_items") def test_transmit_200_customer_sdkstats_track_successful_items(self, track_successful_mock): """Test that track_successful_items is called on 200 success response""" - + exporter = self._create_exporter_with_customer_sdkstats_enabled() - - with mock.patch.object(AzureMonitorClient, "track") as track_mock: + + with mock.patch.object(AzureMonitorExporterClient, "track") as track_mock: track_response = TrackResponse( items_received=1, items_accepted=1, @@ -129,7 +138,7 @@ def test_transmit_200_customer_sdkstats_track_successful_items(self, track_succe ) track_mock.return_value = track_response result = exporter._transmit(self._envelopes_to_export) - + track_successful_mock.assert_called_once_with(self._envelopes_to_export) self.assertEqual(result, ExportResult.SUCCESS) @@ -137,7 +146,7 @@ def test_transmit_200_customer_sdkstats_track_successful_items(self, track_succe def test_transmit_206_customer_sdkstats_track_retry_items(self, track_retry_mock): """Test that _track_retry_items is called on 206 partial success with retryable errors""" exporter = self._create_exporter_with_customer_sdkstats_enabled() - with mock.patch.object(AzureMonitorClient, "track") as track_mock: + with mock.patch.object(AzureMonitorExporterClient, "track") as track_mock: track_mock.return_value = TrackResponse( items_received=2, items_accepted=1, @@ -155,7 +164,7 @@ def test_transmit_206_customer_sdkstats_track_retry_items(self, track_retry_mock def test_transmit_206_customer_sdkstats_track_dropped_items(self, track_dropped_mock): """Test that _track_dropped_items is called on 206 partial success with non-retryable errors""" exporter = self._create_exporter_with_customer_sdkstats_enabled() - with mock.patch.object(AzureMonitorClient, "track") as track_mock: + with mock.patch.object(AzureMonitorExporterClient, "track") as track_mock: track_mock.return_value = TrackResponse( items_received=2, items_accepted=1, @@ -168,7 +177,6 @@ def test_transmit_206_customer_sdkstats_track_dropped_items(self, track_dropped_ track_dropped_mock.assert_called_once() self.assertEqual(result, ExportResult.FAILED_NOT_RETRYABLE) - @mock.patch("azure.monitor.opentelemetry.exporter.export._base.track_retry_items") def test_transmit_retryable_http_error_customer_sdkstats_track_retry_items(self, track_retry_mock): """Test that _track_retry_items is called on retryable HTTP errors (e.g., 408, 502, 503, 504)""" @@ -189,7 +197,7 @@ def test_transmit_throttle_http_error_customer_sdkstats_track_dropped_items(self self.assertTrue(exporter._should_collect_customer_sdkstats()) # Simulate a throttle HTTP error using HttpResponseError - with mock.patch.object(AzureMonitorClient, "track") as track_mock: + with mock.patch.object(AzureMonitorExporterClient, "track") as track_mock: error_response = mock.Mock() error_response.status_code = 402 # Use actual throttle code track_mock.side_effect = HttpResponseError("Throttling error", response=error_response) @@ -202,8 +210,9 @@ def test_transmit_throttle_http_error_customer_sdkstats_track_dropped_items(self def test_transmit_invalid_http_error_customer_sdkstats_track_dropped_items_and_shutdown(self, track_dropped_mock): """Test that _track_dropped_items is called and customer sdkstats is shutdown on invalid HTTP errors (e.g., 400)""" exporter = self._create_exporter_with_customer_sdkstats_enabled() - with mock.patch("requests.Session.request") as request_mock, \ - mock.patch("azure.monitor.opentelemetry.exporter.statsbeat.customer.shutdown_customer_sdkstats_metrics") as shutdown_mock: + with mock.patch("requests.Session.request") as request_mock, mock.patch( + "azure.monitor.opentelemetry.exporter.statsbeat.customer.shutdown_customer_sdkstats_metrics" + ) as shutdown_mock: request_mock.return_value = MockResponse(400, "{}") result = exporter._transmit(self._envelopes_to_export) @@ -215,7 +224,9 @@ def test_transmit_invalid_http_error_customer_sdkstats_track_dropped_items_and_s def test_transmit_service_request_error_customer_sdkstats_track_retry_items(self, track_retry_mock): """Test that _track_retry_items is called on ServiceRequestError""" exporter = self._create_exporter_with_customer_sdkstats_enabled() - with mock.patch.object(AzureMonitorClient, "track", side_effect=ServiceRequestError("Connection error")): + with mock.patch.object( + AzureMonitorExporterClient, "track", side_effect=ServiceRequestError("Connection error") + ): result = exporter._transmit(self._envelopes_to_export) track_retry_mock.assert_called_once() @@ -225,7 +236,9 @@ def test_transmit_service_request_error_customer_sdkstats_track_retry_items(self def test_transmit_general_exception_customer_sdkstats_track_dropped_items(self, track_dropped_mock): """Test that _track_dropped_items is called on general exceptions""" exporter = self._create_exporter_with_customer_sdkstats_enabled() - with mock.patch.object(AzureMonitorClient, "track", side_effect=Exception(_exception_categories.CLIENT_EXCEPTION.value)): + with mock.patch.object( + AzureMonitorExporterClient, "track", side_effect=Exception(_exception_categories.CLIENT_EXCEPTION.value) + ): result = exporter._transmit(self._envelopes_to_export) track_dropped_mock.assert_called_once() @@ -233,12 +246,11 @@ def test_transmit_general_exception_customer_sdkstats_track_dropped_items(self, # Just make sure the function was called self.assertEqual(result, ExportResult.FAILED_NOT_RETRYABLE) - @mock.patch("azure.monitor.opentelemetry.exporter.export._base.track_dropped_items") def test_transmit_storage_disabled_customer_sdkstats_track_dropped_items(self, track_dropped_mock): """Test that _track_dropped_items is called when offline storage is disabled and items would be retried""" exporter = self._create_exporter_with_customer_sdkstats_enabled() - with mock.patch.object(AzureMonitorClient, "track") as track_mock: + with mock.patch.object(AzureMonitorExporterClient, "track") as track_mock: track_mock.return_value = TrackResponse( items_received=1, items_accepted=0, @@ -253,29 +265,30 @@ def test_transmit_storage_disabled_customer_sdkstats_track_dropped_items(self, t # No need to verify specific arguments as the function signature has changed self.assertEqual(result, ExportResult.FAILED_NOT_RETRYABLE) - - @mock.patch('azure.monitor.opentelemetry.exporter.export._base.track_dropped_items') + @mock.patch("azure.monitor.opentelemetry.exporter.export._base.track_dropped_items") @mock.patch("azure.monitor.opentelemetry.exporter.export._base.track_dropped_items_from_storage") - def test_transmit_from_storage_customer_sdkstats_track_dropped_items_from_storage(self, track_dropped_storage_mock, track_dropped_items_mock): + def test_transmit_from_storage_customer_sdkstats_track_dropped_items_from_storage( + self, track_dropped_storage_mock, track_dropped_items_mock + ): """Test that _track_dropped_items_from_storage is called during storage operations""" from azure.monitor.opentelemetry.exporter._storage import StorageExportResult - + exporter = self._create_exporter_with_customer_sdkstats_enabled(disable_offline_storage=False) - + # Set up side_effect for track_dropped_items_from_storage to match the new signature def track_dropped_storage_side_effect(result_from_storage_put, envelopes): # Import here to avoid import error # Using imported track_dropped_items_from_storage # Call the real function which will use our mocked track_dropped_items track_dropped_items_from_storage(result_from_storage_put, envelopes) - + track_dropped_storage_mock.side_effect = track_dropped_storage_side_effect - + # Mock _track_dropped_items to simulate a successful call track_dropped_items_mock.return_value = None - + # Simulate a scenario where storage operations would happen - with mock.patch.object(AzureMonitorClient, "track") as track_mock: + with mock.patch.object(AzureMonitorExporterClient, "track") as track_mock: track_mock.return_value = TrackResponse( items_received=1, items_accepted=0, @@ -283,15 +296,16 @@ def track_dropped_storage_side_effect(result_from_storage_put, envelopes): TelemetryErrorDetails(index=0, status_code=500, message="should retry"), ], ) - + # Mock the storage to simulate storage operations - simulate storage error - with mock.patch.object(exporter.storage, "put", return_value="storage_error") as put_mock, \ - mock.patch.object(exporter.storage, "gets", return_value=["stored_envelope"]) as gets_mock: + with mock.patch.object( + exporter.storage, "put", return_value="storage_error" + ) as put_mock, mock.patch.object(exporter.storage, "gets", return_value=["stored_envelope"]) as gets_mock: # We don't need to mock StorageExportResult anymore result = exporter._transmit(self._envelopes_to_export) track_dropped_storage_mock.assert_called_once() - + # No need to verify specific arguments as the function signature has changed self.assertEqual(result, ExportResult.FAILED_NOT_RETRYABLE) # Storage makes it NOT_RETRYABLE @@ -299,9 +313,9 @@ def track_dropped_storage_side_effect(result_from_storage_put, envelopes): def test_transmit_redirect_parsing_error_customer_sdkstats_track_dropped_items(self, track_dropped_mock): """Test that track_dropped_items is called on redirect errors with invalid headers/parsing errors""" exporter = self._create_exporter_with_customer_sdkstats_enabled() - + # Simulate a redirect HTTP error using HttpResponseError without proper headers - with mock.patch.object(AzureMonitorClient, "track") as track_mock: + with mock.patch.object(AzureMonitorExporterClient, "track") as track_mock: error_response = mock.Mock() error_response.status_code = 307 # Redirect status code error_response.headers = None # No headers to cause parsing error @@ -315,12 +329,12 @@ def test_transmit_redirect_parsing_error_customer_sdkstats_track_dropped_items(s def test_transmit_circular_redirect_customer_sdkstats_track_dropped_items(self, track_dropped_mock): """Test that track_dropped_items is called on circular redirect errors""" exporter = self._create_exporter_with_customer_sdkstats_enabled() - + # Mock the consecutive redirects counter to simulate exceeding max redirects exporter._consecutive_redirects = 10 # Set to a high value to simulate circular redirects - + # Simulate redirect responses that would cause circular redirects - with mock.patch.object(AzureMonitorClient, "track") as track_mock: + with mock.patch.object(AzureMonitorExporterClient, "track") as track_mock: error_response = mock.Mock() error_response.status_code = 307 # Redirect status code error_response.headers = {"location": "https://example.com/redirect"} @@ -336,7 +350,7 @@ def test_transmit_403_forbidden_error_customer_sdkstats_track_retry_items(self, exporter = self._create_exporter_with_customer_sdkstats_enabled() # Simulate a 403 Forbidden HTTP error using HttpResponseError - with mock.patch.object(AzureMonitorClient, "track") as track_mock: + with mock.patch.object(AzureMonitorExporterClient, "track") as track_mock: error_response = mock.Mock() error_response.status_code = 403 # Forbidden code track_mock.side_effect = HttpResponseError("Forbidden error", response=error_response) @@ -351,7 +365,7 @@ def test_transmit_401_unauthorized_error_customer_sdkstats_track_retry_items(sel exporter = self._create_exporter_with_customer_sdkstats_enabled() # Simulate a 401 Unauthorized HTTP error using HttpResponseError - with mock.patch.object(AzureMonitorClient, "track") as track_mock: + with mock.patch.object(AzureMonitorExporterClient, "track") as track_mock: error_response = mock.Mock() error_response.status_code = 401 # Unauthorized code track_mock.side_effect = HttpResponseError("Unauthorized error", response=error_response) @@ -364,9 +378,9 @@ def test_transmit_401_unauthorized_error_customer_sdkstats_track_retry_items(sel def test_transmit_redirect_invalid_location_header_customer_sdkstats_track_dropped_items(self, track_dropped_mock): """Test that track_dropped_items is called when redirect has invalid location header""" exporter = self._create_exporter_with_customer_sdkstats_enabled() - + # Simulate a redirect HTTP error with invalid location header - with mock.patch.object(AzureMonitorClient, "track") as track_mock: + with mock.patch.object(AzureMonitorExporterClient, "track") as track_mock: error_response = mock.Mock() error_response.status_code = 307 # Redirect status code error_response.headers = {"location": "invalid-url"} # Invalid URL format @@ -380,22 +394,25 @@ def test_transmit_redirect_invalid_location_header_customer_sdkstats_track_dropp def test_transmit_from_storage_failure_customer_sdkstats_track_dropped_items(self, track_dropped_mock): """Test that track_dropped_items is called when _transmit_from_storage operations fail""" exporter = self._create_exporter_with_customer_sdkstats_enabled(disable_offline_storage=False) - + # Mock storage operations to simulate a successful initial transmit that triggers storage operations - with mock.patch.object(AzureMonitorClient, "track") as track_mock: + with mock.patch.object(AzureMonitorExporterClient, "track") as track_mock: track_response = TrackResponse( items_received=1, items_accepted=1, errors=[], ) track_mock.return_value = track_response - + # Mock _transmit_from_storage to raise an exception - with mock.patch.object(exporter, '_transmit_from_storage', side_effect=Exception("Storage operation failed")): + with mock.patch.object( + exporter, "_transmit_from_storage", side_effect=Exception("Storage operation failed") + ): result = exporter._transmit(self._envelopes_to_export) # Should still succeed for the main transmission self.assertEqual(result, ExportResult.SUCCESS) + if __name__ == "__main__": unittest.main() diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_base_exporter.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_base_exporter.py index b6737d2d983b..52f5c95c1759 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_base_exporter.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_base_exporter.py @@ -21,8 +21,8 @@ ) from azure.monitor.opentelemetry.exporter._storage import StorageExportResult from azure.monitor.opentelemetry.exporter.statsbeat._state import ( - _REQUESTS_MAP, - _STATSBEAT_STATE, + _REQUESTS_MAP, + _STATSBEAT_STATE, ) from azure.monitor.opentelemetry.exporter.export.metrics._exporter import AzureMonitorMetricExporter from azure.monitor.opentelemetry.exporter.export.trace._exporter import AzureMonitorTraceExporter @@ -38,8 +38,8 @@ _UNKNOWN, _exception_categories, ) -from azure.monitor.opentelemetry.exporter._generated import AzureMonitorClient -from azure.monitor.opentelemetry.exporter._generated.models import ( +from azure.monitor.opentelemetry.exporter._generated.exporter import AzureMonitorExporterClient +from azure.monitor.opentelemetry.exporter._generated.exporter.models import ( MessageData, MetricDataPoint, MetricsData, @@ -101,13 +101,15 @@ def tearDownClass(cls): def setUp(self) -> None: _REQUESTS_MAP.clear() _STATSBEAT_STATE.clear() - _STATSBEAT_STATE.update({ - "INITIAL_FAILURE_COUNT": 0, - "INITIAL_SUCCESS": False, - "SHUTDOWN": False, - "CUSTOM_EVENTS_FEATURE_SET": False, - "LIVE_METRICS_FEATURE_SET": False, - }) + _STATSBEAT_STATE.update( + { + "INITIAL_FAILURE_COUNT": 0, + "INITIAL_SUCCESS": False, + "SHUTDOWN": False, + "CUSTOM_EVENTS_FEATURE_SET": False, + "LIVE_METRICS_FEATURE_SET": False, + } + ) def tearDown(self): clean_folder(self._base.storage._path) @@ -177,10 +179,7 @@ def test_constructor_no_storage_directory(self, mock_get_temp_dir): self.assertEqual(base._api_version, "2021-02-10_Preview") self.assertEqual(base._storage_min_retry_interval, 100) storage_directory = _get_storage_directory(instrumentation_key="4321abcd-5678-4efa-8abc-1234567890ab") - self.assertEqual( - base._storage_directory, - storage_directory - ) + self.assertEqual(base._storage_directory, storage_directory) @mock.patch("azure.monitor.opentelemetry.exporter.export._base.tempfile.gettempdir") def test_constructor_no_storage_directory_and_invalid_instrumentation_key(self, mock_get_temp_dir): @@ -212,10 +211,7 @@ def test_constructor_no_storage_directory_and_invalid_instrumentation_key(self, self.assertEqual(base._api_version, "2021-02-10_Preview") self.assertEqual(base._storage_min_retry_interval, 100) storage_directory = _get_storage_directory(instrumentation_key="") - self.assertNotEqual( - base._storage_directory, - storage_directory - ) + self.assertNotEqual(base._storage_directory, storage_directory) @mock.patch("azure.monitor.opentelemetry.exporter.export._base.getpass.getuser") @mock.patch("azure.monitor.opentelemetry.exporter.export._base.tempfile.gettempdir") @@ -249,10 +245,7 @@ def test_constructor_no_storage_directory_and_invalid_user_details(self, mock_ge self.assertEqual(base._api_version, "2021-02-10_Preview") self.assertEqual(base._storage_min_retry_interval, 100) storage_directory = _get_storage_directory(instrumentation_key="4321abcd-5678-4efa-8abc-1234567890ab") - self.assertEqual( - base._storage_directory, - storage_directory - ) + self.assertEqual(base._storage_directory, storage_directory) mock_get_user.assert_called() @mock.patch("azure.monitor.opentelemetry.exporter.export._base.tempfile.gettempdir") @@ -320,19 +313,15 @@ def test_constructor_disable_offline_storage_with_storage_directory(self, mock_g # STORAGE TESTS # ======================================================================== - @mock.patch("azure.monitor.opentelemetry.exporter.export._base._format_storage_telemetry_item") - @mock.patch.object(TelemetryItem, "from_dict") - def test_transmit_from_storage_success(self, dict_patch, format_patch): + def test_transmit_from_storage_success(self): exporter = BaseExporter() exporter.storage = mock.Mock() blob_mock = mock.Mock() blob_mock.lease.return_value = True envelope_mock = {"name": "test", "time": "time"} blob_mock.get.return_value = [envelope_mock] - dict_patch.return_value = {"name": "test", "time": "time"} - format_patch.return_value = envelope_mock exporter.storage.gets.return_value = [blob_mock] - with mock.patch.object(AzureMonitorClient, "track") as post: + with mock.patch.object(AzureMonitorExporterClient, "track") as post: post.return_value = TrackResponse( items_received=1, items_accepted=1, @@ -343,20 +332,16 @@ def test_transmit_from_storage_success(self, dict_patch, format_patch): blob_mock.lease.assert_called_once() blob_mock.delete.assert_called_once() - @mock.patch("azure.monitor.opentelemetry.exporter.export._base._format_storage_telemetry_item") - @mock.patch.object(TelemetryItem, "from_dict") - def test_transmit_from_storage_store_again(self, dict_patch, format_patch): + def test_transmit_from_storage_store_again(self): exporter = BaseExporter() exporter.storage = mock.Mock() blob_mock = mock.Mock() blob_mock.lease.return_value = True envelope_mock = {"name": "test", "time": "time"} blob_mock.get.return_value = [envelope_mock] - dict_patch.return_value = {"name": "test", "time": "time"} - format_patch.return_value = envelope_mock exporter.storage.gets.return_value = [blob_mock] with mock.patch("azure.monitor.opentelemetry.exporter.export._base._is_retryable_code"): - with mock.patch.object(AzureMonitorClient, "track", throw(HttpResponseError)): + with mock.patch.object(AzureMonitorExporterClient, "track", throw(HttpResponseError)): exporter._transmit_from_storage() exporter.storage.gets.assert_called_once() blob_mock.lease.assert_called() @@ -382,15 +367,15 @@ def test_transmit_from_storage_blob_get_returns_none(self): exporter.storage = mock.Mock() blob_mock = mock.Mock() blob_mock.lease.return_value = True - + blob_mock.get.return_value = None exporter.storage.gets.return_value = [blob_mock] transmit_mock = mock.Mock() exporter._transmit = transmit_mock - + # This should not raise a TypeError exporter._transmit_from_storage() - + # Verify that the blob was leased and deleted (since data was None) exporter.storage.gets.assert_called_once() blob_mock.lease.assert_called_once() @@ -398,7 +383,6 @@ def test_transmit_from_storage_blob_get_returns_none(self): blob_mock.delete.assert_called_once() # Corrupted blob should be deleted transmit_mock.assert_not_called() # No transmission should occur - def test_format_storage_telemetry_item(self): time = datetime.now() base = MonitorBase(base_type="", base_data=None) @@ -425,7 +409,7 @@ def test_format_storage_telemetry_item(self): ti.data = base # Format is called on custom serialized TelemetryItem - converted_ti = ti.from_dict(ti.as_dict()) + converted_ti = TelemetryItem(ti.as_dict()) format_ti = _format_storage_telemetry_item(converted_ti) self.assertTrue(validate_telemetry_item(format_ti, ti)) self.assertEqual(format_ti.data.base_type, "MessageData") @@ -443,7 +427,7 @@ def test_format_storage_telemetry_item(self): ti.data = base # Format is called on custom serialized TelemetryItem - converted_ti = ti.from_dict(ti.as_dict()) + converted_ti = TelemetryItem(ti.as_dict()) format_ti = _format_storage_telemetry_item(converted_ti) self.assertTrue(validate_telemetry_item(format_ti, ti)) self.assertEqual(format_ti.data.base_type, "EventData") @@ -465,7 +449,7 @@ def test_format_storage_telemetry_item(self): ti.data = base # Format is called on custom serialized TelemetryItem - converted_ti = ti.from_dict(ti.as_dict()) + converted_ti = TelemetryItem(ti.as_dict()) format_ti = _format_storage_telemetry_item(converted_ti) self.assertTrue(validate_telemetry_item(format_ti, ti)) self.assertEqual(format_ti.data.base_type, "ExceptionData") @@ -503,7 +487,7 @@ def test_format_storage_telemetry_item(self): ti.data = base # Format is called on custom serialized TelemetryItem - converted_ti = ti.from_dict(ti.as_dict()) + converted_ti = TelemetryItem(ti.as_dict()) format_ti = _format_storage_telemetry_item(converted_ti) self.assertTrue(validate_telemetry_item(format_ti, ti)) self.assertEqual(format_ti.data.base_type, "MetricData") @@ -539,7 +523,7 @@ def test_format_storage_telemetry_item(self): ti.data = base # Format is called on custom serialized TelemetryItem - converted_ti = ti.from_dict(ti.as_dict()) + converted_ti = TelemetryItem(ti.as_dict()) format_ti = _format_storage_telemetry_item(converted_ti) self.assertTrue(validate_telemetry_item(format_ti, ti)) self.assertEqual(format_ti.data.base_type, "RemoteDependencyData") @@ -563,7 +547,7 @@ def test_format_storage_telemetry_item(self): ti.data = base # Format is called on custom serialized TelemetryItem - converted_ti = ti.from_dict(ti.as_dict()) + converted_ti = TelemetryItem(ti.as_dict()) format_ti = _format_storage_telemetry_item(converted_ti) self.assertTrue(validate_telemetry_item(format_ti, ti)) self.assertEqual(format_ti.data.base_type, "RequestData") @@ -573,43 +557,43 @@ def test_handle_transmit_from_storage_success_result(self): """Test that when storage.put() returns StorageExportResult.LOCAL_FILE_BLOB_SUCCESS, the method continues without any special handling.""" exporter = BaseExporter(disable_offline_storage=False) - + # Mock storage.put() to return success exporter.storage = mock.Mock() exporter.storage.put.return_value = StorageExportResult.LOCAL_FILE_BLOB_SUCCESS - + test_envelopes = [TelemetryItem(name="test", time=datetime.now())] serialized_envelopes = [envelope.as_dict() for envelope in test_envelopes] exporter._handle_transmit_from_storage(test_envelopes, ExportResult.FAILED_RETRYABLE) - + # Verify storage.put was called with the serialized envelopes exporter.storage.put.assert_called_once_with(serialized_envelopes) def test_handle_transmit_from_storage_success_triggers_transmit(self): exporter = BaseExporter(disable_offline_storage=False) - - with mock.patch.object(exporter, '_transmit_from_storage') as mock_transmit_from_storage: + + with mock.patch.object(exporter, "_transmit_from_storage") as mock_transmit_from_storage: test_envelopes = [TelemetryItem(name="test", time=datetime.now())] - + exporter._handle_transmit_from_storage(test_envelopes, ExportResult.SUCCESS) - + mock_transmit_from_storage.assert_called_once() def test_handle_transmit_from_storage_no_storage(self): exporter = BaseExporter(disable_offline_storage=True) - + self.assertIsNone(exporter.storage) - + test_envelopes = [TelemetryItem(name="test", time=datetime.now())] - + exporter._handle_transmit_from_storage(test_envelopes, ExportResult.SUCCESS) exporter._handle_transmit_from_storage(test_envelopes, ExportResult.FAILED_RETRYABLE) def test_transmit_from_storage_no_storage(self): exporter = BaseExporter(disable_offline_storage=True) - + self.assertIsNone(exporter.storage) - + exporter._transmit_from_storage() # ======================================================================== @@ -619,14 +603,14 @@ def test_transmit_from_storage_no_storage(self): def test_transmit_http_error_retryable(self): with mock.patch("azure.monitor.opentelemetry.exporter.export._base._is_retryable_code") as m: m.return_value = True - with mock.patch.object(AzureMonitorClient, "track", throw(HttpResponseError)): + with mock.patch.object(AzureMonitorExporterClient, "track", throw(HttpResponseError)): result = self._base._transmit(self._envelopes_to_export) self.assertEqual(result, ExportResult.FAILED_RETRYABLE) def test_transmit_http_error_not_retryable(self): with mock.patch("azure.monitor.opentelemetry.exporter.export._base._is_retryable_code") as m: m.return_value = False - with mock.patch.object(AzureMonitorClient, "track", throw(HttpResponseError)): + with mock.patch.object(AzureMonitorExporterClient, "track", throw(HttpResponseError)): result = self._base._transmit(self._envelopes_to_export) self.assertEqual(result, ExportResult.FAILED_NOT_RETRYABLE) @@ -638,7 +622,7 @@ def test_transmit_http_error_redirect(self): self._base.client._config.redirect_policy.max_redirects = 2 prev_host = self._base.client._config.host error = HttpResponseError(response=response) - with mock.patch.object(AzureMonitorClient, "track") as post: + with mock.patch.object(AzureMonitorExporterClient, "track") as post: post.side_effect = error result = self._base._transmit(self._envelopes_to_export) self.assertEqual(result, ExportResult.FAILED_NOT_RETRYABLE) @@ -653,7 +637,7 @@ def test_transmit_http_error_redirect_missing_headers(self): response.headers = None error = HttpResponseError(response=response) prev_host = self._base.client._config.host - with mock.patch.object(AzureMonitorClient, "track") as post: + with mock.patch.object(AzureMonitorExporterClient, "track") as post: post.side_effect = error result = self._base._transmit(self._envelopes_to_export) self.assertEqual(result, ExportResult.FAILED_NOT_RETRYABLE) @@ -666,7 +650,7 @@ def test_transmit_http_error_redirect_invalid_location_header(self): response.headers = {"location": "123"} error = HttpResponseError(response=response) prev_host = self._base.client._config.host - with mock.patch.object(AzureMonitorClient, "track") as post: + with mock.patch.object(AzureMonitorExporterClient, "track") as post: post.side_effect = error result = self._base._transmit(self._envelopes_to_export) self.assertEqual(result, ExportResult.FAILED_NOT_RETRYABLE) @@ -674,12 +658,12 @@ def test_transmit_http_error_redirect_invalid_location_header(self): self.assertEqual(self._base.client._config.host, prev_host) def test_transmit_request_error(self): - with mock.patch.object(AzureMonitorClient, "track", throw(ServiceRequestError, message="error")): + with mock.patch.object(AzureMonitorExporterClient, "track", throw(ServiceRequestError, message="error")): result = self._base._transmit(self._envelopes_to_export) self.assertEqual(result, ExportResult.FAILED_RETRYABLE) def test_transmission_200(self): - with mock.patch.object(AzureMonitorClient, "track") as post: + with mock.patch.object(AzureMonitorExporterClient, "track") as post: post.return_value = TrackResponse( items_received=1, items_accepted=1, @@ -697,7 +681,7 @@ def test_transmission_206_retry(self): TelemetryItem(name="Test", time=datetime.now()), test_envelope, ] - with mock.patch.object(AzureMonitorClient, "track") as post: + with mock.patch.object(AzureMonitorExporterClient, "track") as post: post.return_value = TrackResponse( items_received=3, items_accepted=1, @@ -723,7 +707,7 @@ def test_transmission_206_no_retry(self): TelemetryItem(name="Test", time=datetime.now()), test_envelope, ] - with mock.patch.object(AzureMonitorClient, "track") as post: + with mock.patch.object(AzureMonitorExporterClient, "track") as post: post.return_value = TrackResponse( items_received=3, items_accepted=2, @@ -811,7 +795,7 @@ def test_transmission_empty(self): @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.collect_statsbeat_metrics") def test_transmit_request_error_statsbeat(self, stats_mock): exporter = BaseExporter(disable_offline_storage=True) - with mock.patch.object(AzureMonitorClient, "track", throw(ServiceRequestError, message="error")): + with mock.patch.object(AzureMonitorExporterClient, "track", throw(ServiceRequestError, message="error")): result = exporter._transmit(self._envelopes_to_export) stats_mock.assert_called_once() self.assertEqual(len(_REQUESTS_MAP), 3) @@ -821,7 +805,7 @@ def test_transmit_request_error_statsbeat(self, stats_mock): self.assertEqual(result, ExportResult.FAILED_RETRYABLE) def test_transmit_request_exception(self): - with mock.patch.object(AzureMonitorClient, "track", throw(Exception)): + with mock.patch.object(AzureMonitorExporterClient, "track", throw(Exception)): result = self._base._transmit(self._envelopes_to_export) self.assertEqual(result, ExportResult.FAILED_NOT_RETRYABLE) @@ -835,7 +819,7 @@ def test_transmit_request_exception(self): @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.collect_statsbeat_metrics") def test_transmit_request_exception_statsbeat(self, stats_mock): exporter = BaseExporter(disable_offline_storage=True) - with mock.patch.object(AzureMonitorClient, "track", throw(Exception)): + with mock.patch.object(AzureMonitorExporterClient, "track", throw(Exception)): result = exporter._transmit(self._envelopes_to_export) stats_mock.assert_called_once() self.assertEqual(len(_REQUESTS_MAP), 3) @@ -853,7 +837,7 @@ def test_transmit_request_exception_statsbeat(self, stats_mock): @mock.patch("azure.monitor.opentelemetry.exporter.statsbeat._statsbeat.collect_statsbeat_metrics") def test_statsbeat_200(self, stats_mock): exporter = BaseExporter(disable_offline_storage=True) - with mock.patch.object(AzureMonitorClient, "track") as post: + with mock.patch.object(AzureMonitorExporterClient, "track") as post: post.return_value = TrackResponse( items_received=1, items_accepted=1, @@ -883,7 +867,7 @@ def test_statsbeat_206_retry(self, stats_mock): TelemetryItem(name="Test", time=datetime.now()), test_envelope, ] - with mock.patch.object(AzureMonitorClient, "track") as post: + with mock.patch.object(AzureMonitorExporterClient, "track") as post: post.return_value = TrackResponse( items_received=3, items_accepted=1, @@ -900,7 +884,7 @@ def test_statsbeat_206_retry(self, stats_mock): stats_mock.assert_called_once() # We do not record any network statsbeat for 206 status code self.assertEqual(len(_REQUESTS_MAP), 2) - self.assertIsNone(_REQUESTS_MAP.get('retry')) + self.assertIsNone(_REQUESTS_MAP.get("retry")) self.assertEqual(_REQUESTS_MAP["count"], 1) self.assertIsNotNone(_REQUESTS_MAP[_REQ_DURATION_NAME[1]]) self.assertEqual(result, ExportResult.FAILED_NOT_RETRYABLE) @@ -921,7 +905,7 @@ def test_statsbeat_206_no_retry(self, stats_mock): TelemetryItem(name="Test", time=datetime.now()), test_envelope, ] - with mock.patch.object(AzureMonitorClient, "track") as post: + with mock.patch.object(AzureMonitorExporterClient, "track") as post: post.return_value = TrackResponse( items_received=3, items_accepted=2, @@ -1143,9 +1127,7 @@ def test_exporter_credential_audience(self, mock_add_credential_policy, mock_get authentication_policy=TEST_AUTH_POLICY, ) - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "TEST_CREDENTIAL_ENV_VAR" - }) + @mock.patch.dict("os.environ", {"APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "TEST_CREDENTIAL_ENV_VAR"}) @mock.patch("azure.monitor.opentelemetry.exporter.export._base._get_authentication_credential") @mock.patch("azure.monitor.opentelemetry.exporter.export._base._get_auth_policy") def test_credential_env_var_and_arg(self, mock_add_credential_policy, mock_get_authentication_credential): @@ -1155,16 +1137,14 @@ def test_credential_env_var_and_arg(self, mock_add_credential_policy, mock_get_a mock_add_credential_policy.assert_called_once_with("TEST_CREDENTIAL_ENV_VAR", TEST_AUTH_POLICY, None) mock_get_authentication_credential.assert_called_once_with(authentication_policy=TEST_AUTH_POLICY) - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "TEST_CREDENTIAL_ENV_VAR" - }) + @mock.patch.dict("os.environ", {"APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "TEST_CREDENTIAL_ENV_VAR"}) @mock.patch("azure.monitor.opentelemetry.exporter.export._base._get_authentication_credential") def test_statsbeat_no_credential(self, mock_get_authentication_credential): mock_get_authentication_credential.return_value = "TEST_CREDENTIAL_ENV_VAR" statsbeat_exporter = AzureMonitorMetricExporter(is_sdkstats=True) self.assertIsNone(statsbeat_exporter._credential) mock_get_authentication_credential.assert_not_called() - + def test_get_auth_policy(self): class TestCredential: def get_token(self): @@ -1197,9 +1177,7 @@ def get_token(): self.assertEqual(result._credential, credential) self.assertEqual(result._scopes, ("test_audience/.default",)) - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "Authorization=AAD" - }) + @mock.patch.dict("os.environ", {"APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "Authorization=AAD"}) def test_get_authentication_credential_arg(self): TEST_CREDENTIAL = "TEST_CREDENTIAL" result = _get_authentication_credential( @@ -1207,89 +1185,73 @@ def test_get_authentication_credential_arg(self): ) self.assertEqual(result, TEST_CREDENTIAL) - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "Authorization=AAD" - }) + @mock.patch.dict("os.environ", {"APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "Authorization=AAD"}) @mock.patch("azure.monitor.opentelemetry.exporter.export._base.logger") @mock.patch("azure.monitor.opentelemetry.exporter.export._base.ManagedIdentityCredential") def test_get_authentication_credential_system_assigned(self, mock_managed_identity, mock_logger): MOCK_MANAGED_IDENTITY_CREDENTIAL = "MOCK_MANAGED_IDENTITY_CREDENTIAL" mock_managed_identity.return_value = MOCK_MANAGED_IDENTITY_CREDENTIAL - result = _get_authentication_credential( - foo="bar" - ) + result = _get_authentication_credential(foo="bar") mock_logger.assert_not_called() self.assertEqual(result, MOCK_MANAGED_IDENTITY_CREDENTIAL) mock_managed_identity.assert_called_once_with() - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "Authorization=AAD;ClientId=TEST_CLIENT_ID" - }) + @mock.patch.dict( + "os.environ", {"APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "Authorization=AAD;ClientId=TEST_CLIENT_ID"} + ) @mock.patch("azure.monitor.opentelemetry.exporter.export._base.logger") @mock.patch("azure.monitor.opentelemetry.exporter.export._base.ManagedIdentityCredential") def test_get_authentication_credential_client_id(self, mock_managed_identity, mock_logger): MOCK_MANAGED_IDENTITY_CLIENT_ID_CREDENTIAL = "MOCK_MANAGED_IDENTITY_CLIENT_ID_CREDENTIAL" mock_managed_identity.return_value = MOCK_MANAGED_IDENTITY_CLIENT_ID_CREDENTIAL - result = _get_authentication_credential( - foo="bar" - ) + result = _get_authentication_credential(foo="bar") mock_logger.assert_not_called() self.assertEqual(result, MOCK_MANAGED_IDENTITY_CLIENT_ID_CREDENTIAL) mock_managed_identity.assert_called_once_with(client_id="TEST_CLIENT_ID") - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "Authorization=AAD;ClientId=TEST_CLIENT_ID=bar" - }) + @mock.patch.dict( + "os.environ", {"APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "Authorization=AAD;ClientId=TEST_CLIENT_ID=bar"} + ) @mock.patch("azure.monitor.opentelemetry.exporter.export._base.logger") @mock.patch("azure.monitor.opentelemetry.exporter.export._base.ManagedIdentityCredential") def test_get_authentication_credential_misformatted(self, mock_managed_identity, mock_logger): # Even a single misformatted pair means Entra ID auth is skipped. MOCK_MANAGED_IDENTITY_CREDENTIAL = "MOCK_MANAGED_IDENTITY_CREDENTIAL" mock_managed_identity.return_value = MOCK_MANAGED_IDENTITY_CREDENTIAL - result = _get_authentication_credential( - foo="bar" - ) + result = _get_authentication_credential(foo="bar") mock_logger.error.assert_called_once() self.assertIsNone(result) mock_managed_identity.assert_not_called() - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "ClientId=TEST_CLIENT_ID" - }) + @mock.patch.dict("os.environ", {"APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "ClientId=TEST_CLIENT_ID"}) @mock.patch("azure.monitor.opentelemetry.exporter.export._base.ManagedIdentityCredential") def test_get_authentication_credential_no_auth(self, mock_managed_identity): MOCK_MANAGED_IDENTITY_CLIENT_ID_CREDENTIAL = "MOCK_MANAGED_IDENTITY_CLIENT_ID_CREDENTIAL" mock_managed_identity.return_value = MOCK_MANAGED_IDENTITY_CLIENT_ID_CREDENTIAL - result = _get_authentication_credential( - foo="bar" - ) + result = _get_authentication_credential(foo="bar") self.assertIsNone(result) mock_managed_identity.assert_not_called() - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "Authorization=foobar;ClientId=TEST_CLIENT_ID" - }) + @mock.patch.dict( + "os.environ", {"APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "Authorization=foobar;ClientId=TEST_CLIENT_ID"} + ) @mock.patch("azure.monitor.opentelemetry.exporter.export._base.ManagedIdentityCredential") def test_get_authentication_credential_no_aad(self, mock_managed_identity): MOCK_MANAGED_IDENTITY_CLIENT_ID_CREDENTIAL = "MOCK_MANAGED_IDENTITY_CLIENT_ID_CREDENTIAL" mock_managed_identity.return_value = MOCK_MANAGED_IDENTITY_CLIENT_ID_CREDENTIAL - result = _get_authentication_credential( - foo="bar" - ) + result = _get_authentication_credential(foo="bar") self.assertIsNone(result) mock_managed_identity.assert_not_called() - @mock.patch.dict("os.environ", { - "APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "Authorization=AAD;ClientId=TEST_CLIENT_ID" - }) + @mock.patch.dict( + "os.environ", {"APPLICATIONINSIGHTS_AUTHENTICATION_STRING": "Authorization=AAD;ClientId=TEST_CLIENT_ID"} + ) @mock.patch("azure.monitor.opentelemetry.exporter.export._base.ManagedIdentityCredential") def test_get_authentication_credential_error(self, mock_managed_identity): MOCK_MANAGED_IDENTITY_CLIENT_ID_CREDENTIAL = "MOCK_MANAGED_IDENTITY_CLIENT_ID_CREDENTIAL" mock_managed_identity.return_value = MOCK_MANAGED_IDENTITY_CLIENT_ID_CREDENTIAL mock_managed_identity.side_effect = ValueError("TEST ERROR") - result = _get_authentication_credential( - foo="bar" - ) + result = _get_authentication_credential(foo="bar") self.assertIsNone(result) mock_managed_identity.assert_called_once_with(client_id="TEST_CLIENT_ID") @@ -1320,9 +1282,21 @@ def __init__(self, status_code, text, headers={}, reason="test", content="{}"): self.reason = reason self.content = content self.raw = MockRaw() + self._content_consumed = False + + def iter_content(self, chunk_size=1): + content_bytes = self.content.encode() if isinstance(self.content, str) else self.content + for i in range(0, len(content_bytes), chunk_size): + yield content_bytes[i : i + chunk_size] + self._content_consumed = True + + def iter_bytes(self, chunk_size=None): + return self.iter_content(chunk_size or 1) + + def close(self): + pass class MockRaw: def __init__(self): self.enforce_content_length = False - diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_connection_string_parser.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_connection_string_parser.py index f25081b5e658..d235c5dde117 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_connection_string_parser.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_connection_string_parser.py @@ -216,32 +216,36 @@ def test_parse_connection_string_suffix_no_location(self): def test_region_extraction_from_endpoint_with_number(self): """Test region extraction from endpoint URL with number suffix.""" parser = ConnectionStringParser( - connection_string="InstrumentationKey=" + self._valid_instrumentation_key + - ";IngestionEndpoint=https://westeurope-5.in.applicationinsights.azure.com/" + connection_string="InstrumentationKey=" + + self._valid_instrumentation_key + + ";IngestionEndpoint=https://westeurope-5.in.applicationinsights.azure.com/" ) self.assertEqual(parser.region, "westeurope") def test_region_extraction_from_endpoint_without_number(self): """Test region extraction from endpoint URL without number suffix.""" parser = ConnectionStringParser( - connection_string="InstrumentationKey=" + self._valid_instrumentation_key + - ";IngestionEndpoint=https://westeurope.in.applicationinsights.azure.com/" + connection_string="InstrumentationKey=" + + self._valid_instrumentation_key + + ";IngestionEndpoint=https://westeurope.in.applicationinsights.azure.com/" ) self.assertEqual(parser.region, "westeurope") def test_region_extraction_from_endpoint_two_digit_number(self): """Test region extraction from endpoint URL with two-digit number.""" parser = ConnectionStringParser( - connection_string="InstrumentationKey=" + self._valid_instrumentation_key + - ";IngestionEndpoint=https://eastus-12.in.applicationinsights.azure.com/" + connection_string="InstrumentationKey=" + + self._valid_instrumentation_key + + ";IngestionEndpoint=https://eastus-12.in.applicationinsights.azure.com/" ) self.assertEqual(parser.region, "eastus") def test_region_extraction_from_endpoint_three_digit_number(self): """Test region extraction from endpoint URL with three-digit number.""" parser = ConnectionStringParser( - connection_string="InstrumentationKey=" + self._valid_instrumentation_key + - ";IngestionEndpoint=https://northeurope-999.in.applicationinsights.azure.com/" + connection_string="InstrumentationKey=" + + self._valid_instrumentation_key + + ";IngestionEndpoint=https://northeurope-999.in.applicationinsights.azure.com/" ) self.assertEqual(parser.region, "northeurope") @@ -255,28 +259,28 @@ def test_region_extraction_various_regions(self): ("westus2-7.in.applicationinsights.azure.com", "westus2"), ("francecentral.in.applicationinsights.azure.com", "francecentral"), ] - + for endpoint_suffix, expected_region in test_cases: with self.subTest(endpoint=endpoint_suffix): parser = ConnectionStringParser( - connection_string="InstrumentationKey=" + self._valid_instrumentation_key + - f";IngestionEndpoint=https://{endpoint_suffix}/" + connection_string="InstrumentationKey=" + + self._valid_instrumentation_key + + f";IngestionEndpoint=https://{endpoint_suffix}/" ) self.assertEqual(parser.region, expected_region) def test_region_extraction_no_region_global_endpoint(self): """Test that no region is extracted from global endpoints.""" parser = ConnectionStringParser( - connection_string="InstrumentationKey=" + self._valid_instrumentation_key + - ";IngestionEndpoint=https://dc.services.visualstudio.com" + connection_string="InstrumentationKey=" + + self._valid_instrumentation_key + + ";IngestionEndpoint=https://dc.services.visualstudio.com" ) self.assertIsNone(parser.region) def test_region_extraction_no_region_default_endpoint(self): """Test that no region is extracted when using default endpoint.""" - parser = ConnectionStringParser( - connection_string="InstrumentationKey=" + self._valid_instrumentation_key - ) + parser = ConnectionStringParser(connection_string="InstrumentationKey=" + self._valid_instrumentation_key) self.assertIsNone(parser.region) def test_region_extraction_invalid_endpoint_format(self): @@ -287,20 +291,22 @@ def test_region_extraction_invalid_endpoint_format(self): "https://not-a-region.in.applicationinsights.azure.com", "ftp://westeurope-5.in.applicationinsights.azure.com", ] - + for endpoint in invalid_endpoints: with self.subTest(endpoint=endpoint): parser = ConnectionStringParser( - connection_string="InstrumentationKey=" + self._valid_instrumentation_key + - f";IngestionEndpoint={endpoint}" + connection_string="InstrumentationKey=" + + self._valid_instrumentation_key + + f";IngestionEndpoint={endpoint}" ) self.assertIsNone(parser.region) def test_region_extraction_from_environment_endpoint(self): """Test region extraction from endpoint set via environment variable.""" os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"] = ( - "InstrumentationKey=" + self._valid_instrumentation_key + - ";IngestionEndpoint=https://westeurope-5.in.applicationinsights.azure.com/" + "InstrumentationKey=" + + self._valid_instrumentation_key + + ";IngestionEndpoint=https://westeurope-5.in.applicationinsights.azure.com/" ) parser = ConnectionStringParser(connection_string=None) self.assertEqual(parser.region, "westeurope") @@ -308,12 +314,14 @@ def test_region_extraction_from_environment_endpoint(self): def test_region_extraction_code_endpoint_takes_priority(self): """Test that endpoint from code connection string takes priority over environment.""" os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"] = ( - "InstrumentationKey=" + self._valid_instrumentation_key + - ";IngestionEndpoint=https://eastus-1.in.applicationinsights.azure.com/" + "InstrumentationKey=" + + self._valid_instrumentation_key + + ";IngestionEndpoint=https://eastus-1.in.applicationinsights.azure.com/" ) parser = ConnectionStringParser( - connection_string="InstrumentationKey=" + self._valid_instrumentation_key + - ";IngestionEndpoint=https://westeurope-5.in.applicationinsights.azure.com/" + connection_string="InstrumentationKey=" + + self._valid_instrumentation_key + + ";IngestionEndpoint=https://westeurope-5.in.applicationinsights.azure.com/" ) self.assertEqual(parser.region, "westeurope") @@ -323,12 +331,13 @@ def test_region_extraction_with_trailing_slash(self): "https://westeurope-5.in.applicationinsights.azure.com/", "https://westeurope-5.in.applicationinsights.azure.com", ] - + for endpoint in test_cases: with self.subTest(endpoint=endpoint): parser = ConnectionStringParser( - connection_string="InstrumentationKey=" + self._valid_instrumentation_key + - f";IngestionEndpoint={endpoint}" + connection_string="InstrumentationKey=" + + self._valid_instrumentation_key + + f";IngestionEndpoint={endpoint}" ) self.assertEqual(parser.region, "westeurope") @@ -339,11 +348,12 @@ def test_region_extraction_alphanumeric_regions(self): ("eastus2-5.in.applicationinsights.azure.com", "eastus2"), ("southcentralus-3.in.applicationinsights.azure.com", "southcentralus"), ] - + for endpoint_suffix, expected_region in test_cases: with self.subTest(endpoint=endpoint_suffix): parser = ConnectionStringParser( - connection_string="InstrumentationKey=" + self._valid_instrumentation_key + - f";IngestionEndpoint=https://{endpoint_suffix}" + connection_string="InstrumentationKey=" + + self._valid_instrumentation_key + + f";IngestionEndpoint=https://{endpoint_suffix}" ) self.assertEqual(parser.region, expected_region) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_storage.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_storage.py index 86cb1fdf3fde..4f13e493bb3a 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_storage.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_storage.py @@ -69,7 +69,7 @@ def test_put_error(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "foobar")) with mock.patch("os.rename", side_effect=throw(Exception)): result = blob.put([1, 2, 3]) - #self.assertIsInstance(result, str) + # self.assertIsInstance(result, str) @unittest.skip("transient storage") def test_put_success_returns_self(self): @@ -83,7 +83,7 @@ def test_put_success_returns_self(self): def test_put_file_write_error_returns_string(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "write_error_blob")) test_input = [1, 2, 3] - + with mock.patch("builtins.open", side_effect=PermissionError("Cannot write to file")): result = blob.put(test_input) self.assertIsInstance(result, str) @@ -93,7 +93,7 @@ def test_put_file_write_error_returns_string(self): def test_put_rename_error_returns_string(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "rename_error_blob")) test_input = [1, 2, 3] - + # Mock os.rename to raise an exception with mock.patch("os.rename", side_effect=OSError("File already exists")): result = blob.put(test_input) @@ -102,19 +102,20 @@ def test_put_rename_error_returns_string(self): def test_put_json_serialization_error_returns_string(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "json_error_blob")) - + import datetime + non_serializable_data = [datetime.datetime.now()] - + result = blob.put(non_serializable_data) self.assertIsInstance(result, str) # Should contain JSON serialization error self.assertTrue("not JSON serializable" in result or "Object of type" in result) - + def test_put_various_exceptions_return_strings(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "various_errors_blob")) test_input = [1, 2, 3] - + exception_scenarios = [ ("FileNotFoundError", FileNotFoundError("Directory not found")), ("PermissionError", PermissionError("Permission denied")), @@ -123,7 +124,7 @@ def test_put_various_exceptions_return_strings(self): ("ValueError", ValueError("Invalid value")), ("RuntimeError", RuntimeError("Runtime error occurred")), ] - + for error_name, exception in exception_scenarios: with self.subTest(exception=error_name): with mock.patch("os.rename", side_effect=exception): @@ -136,19 +137,19 @@ def test_put_with_lease_period_success(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "lease_success_blob")) test_input = [1, 2, 3] lease_period = 60 - + result = blob.put(test_input, lease_period=lease_period) self.assertIsInstance(result, StorageExportResult) self.assertEqual(result, StorageExportResult.LOCAL_FILE_BLOB_SUCCESS) # File should have .lock extension due to lease period self.assertTrue(blob.fullpath.endswith(".lock")) - + @unittest.skip("transient storage") def test_put_with_lease_period_error_returns_string(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "lease_error_blob")) test_input = [1, 2, 3] lease_period = 60 - + # Mock os.rename to fail with mock.patch("os.rename", side_effect=OSError("Cannot rename file")): result = blob.put(test_input, lease_period=lease_period) @@ -158,7 +159,7 @@ def test_put_with_lease_period_error_returns_string(self): def test_put_empty_data_success(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "empty_data_blob")) empty_data = [] - + result = blob.put(empty_data) self.assertIsInstance(result, StorageExportResult) self.assertEqual(result, StorageExportResult.LOCAL_FILE_BLOB_SUCCESS) @@ -168,11 +169,11 @@ def test_put_large_data_success(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "large_data_blob")) # Create a large list of data large_data = [{"id": i, "value": f"data_{i}"} for i in range(1000)] - + result = blob.put(large_data) self.assertIsInstance(result, StorageExportResult) self.assertEqual(result, StorageExportResult.LOCAL_FILE_BLOB_SUCCESS) - + # Verify data can be retrieved retrieved_data = blob.get() self.assertEqual(len(retrieved_data), 1000) @@ -182,28 +183,30 @@ def test_put_large_data_success(self): def test_put_return_type_consistency(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "consistency_blob")) test_input = [1, 2, 3] - + # Test successful case result_success = blob.put(test_input) self.assertTrue(isinstance(result_success, StorageExportResult) or isinstance(result_success, str)) - + # Test error case blob2 = LocalFileBlob(os.path.join(TEST_FOLDER, "consistency_blob2")) with mock.patch("os.rename", side_effect=Exception("Test error")): result_error = blob2.put(test_input) self.assertIsInstance(result_error, str) - + def test_put_invalid_return_type(self): blob = LocalFileBlob(os.path.join(TEST_FOLDER, "invalid_return_blob")) test_input = [1, 2, 3] - + # This tests that even if os.rename somehow returns something unexpected, # the put method still maintains its type contract with mock.patch("os.rename", return_value=42): result = blob.put(test_input) # Should either convert to string or return StorageExportResult - self.assertTrue(isinstance(result, (StorageExportResult, str)), - f"Expected StorageExportResult or str, got {type(result)}") + self.assertTrue( + isinstance(result, (StorageExportResult, str)), + f"Expected StorageExportResult or str, got {type(result)}", + ) @unittest.skip("transient storage") def test_put(self): @@ -259,7 +262,7 @@ def test_put(self): with mock.patch("os.rename", side_effect=throw(Exception)): result = stor.put(test_input) # Should return an error string when os.rename fails - #self.assertIsInstance(result, None) + # self.assertIsInstance(result, None) def test_put_max_size(self): test_input = (1, 2, 3) @@ -321,7 +324,9 @@ def test_maintenance_routine(self): def test_put_storage_disabled_readonly(self): test_input = (1, 2, 3) - with mock.patch("azure.monitor.opentelemetry.exporter._storage.get_local_storage_setup_state_readonly", return_value=True): + with mock.patch( + "azure.monitor.opentelemetry.exporter._storage.get_local_storage_setup_state_readonly", return_value=True + ): with LocalFileStorage(os.path.join(TEST_FOLDER, "readonly_test")) as stor: stor._enabled = False result = stor.put(test_input) @@ -330,8 +335,13 @@ def test_put_storage_disabled_readonly(self): def test_put_storage_disabled_with_exception_state(self): test_input = (1, 2, 3) exception_message = "Previous storage error occurred" - with mock.patch("azure.monitor.opentelemetry.exporter._storage.get_local_storage_setup_state_readonly", return_value=False): - with mock.patch("azure.monitor.opentelemetry.exporter._storage.get_local_storage_setup_state_exception", return_value=exception_message): + with mock.patch( + "azure.monitor.opentelemetry.exporter._storage.get_local_storage_setup_state_readonly", return_value=False + ): + with mock.patch( + "azure.monitor.opentelemetry.exporter._storage.get_local_storage_setup_state_exception", + return_value=exception_message, + ): with LocalFileStorage(os.path.join(TEST_FOLDER, "exception_test")) as stor: stor._enabled = False result = stor.put(test_input) @@ -339,8 +349,12 @@ def test_put_storage_disabled_with_exception_state(self): def test_put_storage_disabled_no_exception(self): test_input = (1, 2, 3) - with mock.patch("azure.monitor.opentelemetry.exporter._storage.get_local_storage_setup_state_readonly", return_value=False): - with mock.patch("azure.monitor.opentelemetry.exporter._storage.get_local_storage_setup_state_exception", return_value=""): + with mock.patch( + "azure.monitor.opentelemetry.exporter._storage.get_local_storage_setup_state_readonly", return_value=False + ): + with mock.patch( + "azure.monitor.opentelemetry.exporter._storage.get_local_storage_setup_state_exception", return_value="" + ): with LocalFileStorage(os.path.join(TEST_FOLDER, "disabled_test")) as stor: stor._enabled = False result = stor.put(test_input) @@ -349,7 +363,7 @@ def test_put_storage_disabled_no_exception(self): def test_put_persistence_capacity_reached(self): test_input = (1, 2, 3) with LocalFileStorage(os.path.join(TEST_FOLDER, "capacity_test")) as stor: - with mock.patch.object(stor, '_check_storage_size', return_value=False): + with mock.patch.object(stor, "_check_storage_size", return_value=False): result = stor.put(test_input) self.assertEqual(result, StorageExportResult.CLIENT_PERSISTENCE_CAPACITY_REACHED) @@ -359,7 +373,7 @@ def test_put_success_returns_localfileblob(self): result = stor.put(test_input, lease_period=0) # No lease period so file is immediately available self.assertIsInstance(result, StorageExportResult) self.assertEqual(stor.get().get(), test_input) - + def test_put_blob_put_failure_returns_string(self): test_input = (1, 2, 3) with LocalFileStorage(os.path.join(TEST_FOLDER, "blob_failure_test")) as stor: @@ -368,12 +382,13 @@ def test_put_blob_put_failure_returns_string(self): result = stor.put(test_input) self.assertIsInstance(result, str) self.assertIn("Permission denied", result) - def test_put_exception_in_method_returns_string(self): test_input = (1, 2, 3) with LocalFileStorage(os.path.join(TEST_FOLDER, "method_exception_test")) as stor: - with mock.patch("azure.monitor.opentelemetry.exporter._storage._now", side_effect=RuntimeError("Time error")): + with mock.patch( + "azure.monitor.opentelemetry.exporter._storage._now", side_effect=RuntimeError("Time error") + ): result = stor.put(test_input) self.assertIsInstance(result, str) self.assertIn("Time error", result) @@ -386,7 +401,7 @@ def test_put_various_blob_errors(self): ("OSError", OSError("Disk full")), ("IOError", IOError("I/O error")), ] - + for error_name, error_exception in error_scenarios: with self.subTest(error=error_name): with LocalFileStorage(os.path.join(TEST_FOLDER, f"error_test_{error_name}")) as stor: @@ -399,7 +414,7 @@ def test_put_various_blob_errors(self): def test_put_with_lease_period(self): test_input = (1, 2, 3) custom_lease_period = 120 # 2 minutes - + with LocalFileStorage(os.path.join(TEST_FOLDER, "lease_test")) as stor: result = stor.put(test_input, lease_period=custom_lease_period) self.assertIsInstance(result, StorageExportResult) @@ -408,7 +423,7 @@ def test_put_with_lease_period(self): def test_put_default_lease_period(self): test_input = (1, 2, 3) - + with LocalFileStorage(os.path.join(TEST_FOLDER, "default_lease_test"), lease_period=90) as stor: result = stor.put(test_input) self.assertIsInstance(result, StorageExportResult) @@ -423,30 +438,30 @@ def test_check_and_set_folder_permissions_oserror_sets_exception_state(self): get_local_storage_setup_state_exception, set_local_storage_setup_state_exception, ) - + # Clear any existing exception state (set to empty string, not None) set_local_storage_setup_state_exception("") - + # Mock os.makedirs to raise OSError during folder permissions check with mock.patch("os.makedirs", side_effect=OSError(test_error_message)): stor = LocalFileStorage(os.path.join(TEST_FOLDER, "permission_error_test")) - + # Storage should be disabled due to permission error self.assertFalse(stor._enabled) - + # Exception state should be set with the error message exception_state = get_local_storage_setup_state_exception() self.assertEqual(exception_state, test_error_message) - + # When storage is disabled with exception state, put() should return the exception message result = stor.put(test_input) self.assertEqual(result, test_error_message) - + stor.close() - + # Clean up set_local_storage_setup_state_exception("") - + def test_check_and_set_folder_permissions_generic_exception_sets_exception_state(self): test_input = (1, 2, 3) test_error_message = "RuntimeError: Unexpected error during setup" @@ -455,30 +470,30 @@ def test_check_and_set_folder_permissions_generic_exception_sets_exception_state get_local_storage_setup_state_exception, set_local_storage_setup_state_exception, ) - + # Clear any existing exception state (set to empty string, not None) set_local_storage_setup_state_exception("") - + # Mock os.makedirs to raise a generic exception with mock.patch("os.makedirs", side_effect=RuntimeError(test_error_message)): stor = LocalFileStorage(os.path.join(TEST_FOLDER, "generic_error_test")) - + # Storage should be disabled due to exception self.assertFalse(stor._enabled) - + # Exception state should be set with the error message exception_state = get_local_storage_setup_state_exception() self.assertEqual(exception_state, test_error_message) - + # When storage is disabled with exception state, put() should return the exception message result = stor.put(test_input) self.assertEqual(result, test_error_message) - + stor.close() - + # Clean up set_local_storage_setup_state_exception("") - + def test_check_and_set_folder_permissions_readonly_filesystem_sets_readonly_state(self): test_input = (1, 2, 3) @@ -486,31 +501,32 @@ def test_check_and_set_folder_permissions_readonly_filesystem_sets_readonly_stat get_local_storage_setup_state_readonly, set_local_storage_setup_state_exception, ) - + # Clear any existing states (set to empty string, not None) set_local_storage_setup_state_exception("") - + # Create an OSError with Read-only file system import errno + readonly_error = OSError("Read-only file system") - readonly_error.errno = errno.EROFS # cspell:disable-line - + readonly_error.errno = errno.EROFS # cspell:disable-line + # Mock os.makedirs to raise READONLY error with mock.patch("os.makedirs", side_effect=readonly_error): stor = LocalFileStorage(os.path.join(TEST_FOLDER, "readonly_fs_test")) - + # Storage should be disabled due to readonly filesystem self.assertFalse(stor._enabled) - + # Readonly state should be set self.assertTrue(get_local_storage_setup_state_readonly()) - + # When storage is disabled and readonly, put() should return CLIENT_READONLY result = stor.put(test_input) self.assertEqual(result, StorageExportResult.CLIENT_READONLY) - + stor.close() - + # Clean up - note: cannot easily reset readonly state, but test isolation should handle this set_local_storage_setup_state_exception("") @@ -522,10 +538,10 @@ def test_check_and_set_folder_permissions_windows_icacls_failure_sets_exception_ get_local_storage_setup_state_readonly, set_local_storage_setup_state_exception, ) - + # Clear any existing exception state (set to empty string, not None) set_local_storage_setup_state_exception("") - + # Mock Windows environment and icacls failure with mock.patch("os.name", "nt"): # Windows with mock.patch("os.makedirs"): # Allow directory creation @@ -533,17 +549,17 @@ def test_check_and_set_folder_permissions_windows_icacls_failure_sets_exception_ # Mock subprocess.run to return failure (non-zero return code) mock_result = mock.MagicMock() mock_result.returncode = 1 # Failure - + with mock.patch("subprocess.run", return_value=mock_result): stor = LocalFileStorage(os.path.join(TEST_FOLDER, "icacls_failure_test")) - + # Storage should be disabled due to icacls failure self.assertFalse(stor._enabled) - + # Exception state should still be empty string since icacls failure doesn't set exception exception_state = get_local_storage_setup_state_exception() self.assertEqual(exception_state, "") - + # When storage is disabled, put() behavior depends on readonly state result = stor.put(test_input) if get_local_storage_setup_state_readonly(): @@ -552,12 +568,12 @@ def test_check_and_set_folder_permissions_windows_icacls_failure_sets_exception_ else: # If readonly not set, should return CLIENT_STORAGE_DISABLED self.assertEqual(result, StorageExportResult.CLIENT_STORAGE_DISABLED) - + stor.close() - + # Clean up set_local_storage_setup_state_exception("") - + def test_check_and_set_folder_permissions_windows_user_retrieval_failure(self): test_input = (1, 2, 3) @@ -566,19 +582,19 @@ def test_check_and_set_folder_permissions_windows_user_retrieval_failure(self): get_local_storage_setup_state_readonly, set_local_storage_setup_state_exception, ) - + # Clear any existing exception state (set to empty string, not None) set_local_storage_setup_state_exception("") - + # Mock Windows environment and user retrieval failure with mock.patch("os.name", "nt"): # Windows with mock.patch("os.makedirs"): # Allow directory creation with mock.patch.object(LocalFileStorage, "_get_current_user", return_value=None): stor = LocalFileStorage(os.path.join(TEST_FOLDER, "user_failure_test")) - + # Storage should be disabled due to user retrieval failure self.assertFalse(stor._enabled) - + # When storage is disabled, put() behavior depends on readonly state result = stor.put(test_input) if get_local_storage_setup_state_readonly(): @@ -587,12 +603,12 @@ def test_check_and_set_folder_permissions_windows_user_retrieval_failure(self): else: # If readonly not set, should return CLIENT_STORAGE_DISABLED self.assertEqual(result, StorageExportResult.CLIENT_STORAGE_DISABLED) - + stor.close() - + # Clean up set_local_storage_setup_state_exception("") - + def test_check_and_set_folder_permissions_unix_chmod_exception_sets_exception_state(self): test_input = (1, 2, 3) test_error_message = "OSError: Operation not permitted" @@ -602,23 +618,23 @@ def test_check_and_set_folder_permissions_unix_chmod_exception_sets_exception_st get_local_storage_setup_state_readonly, set_local_storage_setup_state_exception, ) - + # Clear any existing exception state (set to empty string, not None) set_local_storage_setup_state_exception("") - + # Mock Unix environment and chmod failure with mock.patch("os.name", "posix"): # Unix with mock.patch("os.makedirs"): # Allow directory creation with mock.patch("os.chmod", side_effect=OSError(test_error_message)): stor = LocalFileStorage(os.path.join(TEST_FOLDER, "chmod_failure_test")) - + # Storage should be disabled due to chmod failure self.assertFalse(stor._enabled) - + # Exception state should be set with the error message exception_state = get_local_storage_setup_state_exception() self.assertEqual(exception_state, test_error_message) - + # When storage is disabled, put() behavior depends on readonly state result = stor.put(test_input) if get_local_storage_setup_state_readonly(): @@ -627,12 +643,12 @@ def test_check_and_set_folder_permissions_unix_chmod_exception_sets_exception_st else: # If readonly not set, should return the exception message self.assertEqual(result, test_error_message) - + stor.close() - + # Clean up set_local_storage_setup_state_exception("") - + def test_exception_state_persistence_across_storage_instances(self): test_input = (1, 2, 3) test_error_message = "Persistent storage setup error" @@ -641,17 +657,17 @@ def test_exception_state_persistence_across_storage_instances(self): get_local_storage_setup_state_exception, set_local_storage_setup_state_exception, ) - + # Clear any existing exception state (set to empty string, not None) set_local_storage_setup_state_exception("") - + # First storage instance that sets exception state with mock.patch("os.makedirs", side_effect=OSError(test_error_message)): stor1 = LocalFileStorage(os.path.join(TEST_FOLDER, "persistent_error_test1")) self.assertFalse(stor1._enabled) self.assertEqual(get_local_storage_setup_state_exception(), test_error_message) stor1.close() - + # Second storage instance should also be affected by the exception state # (assuming the exception state persists between instances in the same process) stor2 = LocalFileStorage(os.path.join(TEST_FOLDER, "persistent_error_test2")) @@ -661,34 +677,34 @@ def test_exception_state_persistence_across_storage_instances(self): if isinstance(result, str) and test_error_message in result: self.assertIn(test_error_message, result) stor2.close() - + # Clean up set_local_storage_setup_state_exception("") - + def test_exception_state_cleared_and_storage_recovery(self): test_input = (1, 2, 3) test_error_message = "Temporary storage setup error" - + from azure.monitor.opentelemetry.exporter.statsbeat.customer._state import ( get_local_storage_setup_state_exception, set_local_storage_setup_state_exception, ) - + # Set exception state manually set_local_storage_setup_state_exception(test_error_message) self.assertEqual(get_local_storage_setup_state_exception(), test_error_message) - + # Create storage with exception state set - should be disabled stor1 = LocalFileStorage(os.path.join(TEST_FOLDER, "recovery_test1")) if not stor1._enabled: result = stor1.put(test_input) self.assertEqual(result, test_error_message) stor1.close() - + # Clear exception state (set to empty string, not None) set_local_storage_setup_state_exception("") self.assertEqual(get_local_storage_setup_state_exception(), "") - + # Create new storage instance - should work normally now with LocalFileStorage(os.path.join(TEST_FOLDER, "recovery_test2")) as stor2: if stor2._enabled: # Storage should be enabled now @@ -702,37 +718,37 @@ def test_local_storage_state_readonly_get_set_operations(self): get_local_storage_setup_state_readonly, set_local_storage_setup_state_readonly, _LOCAL_STORAGE_SETUP_STATE, - _LOCAL_STORAGE_SETUP_STATE_LOCK + _LOCAL_STORAGE_SETUP_STATE_LOCK, ) - + # Save original state original_readonly_state = _LOCAL_STORAGE_SETUP_STATE["READONLY"] - + try: # Test 1: Initial state should be False # Note: Cannot easily reset readonly state, so we'll work with current state initial_state = get_local_storage_setup_state_readonly() self.assertIsInstance(initial_state, bool) - + # Test 2: Set readonly state and verify get operation set_local_storage_setup_state_readonly() self.assertTrue(get_local_storage_setup_state_readonly()) - + # Test 3: Verify thread safety by directly accessing state with _LOCAL_STORAGE_SETUP_STATE_LOCK: direct_value = _LOCAL_STORAGE_SETUP_STATE["READONLY"] self.assertTrue(direct_value) self.assertEqual(get_local_storage_setup_state_readonly(), direct_value) - + # Test 4: Multiple calls to set should maintain True state set_local_storage_setup_state_readonly() set_local_storage_setup_state_readonly() self.assertTrue(get_local_storage_setup_state_readonly()) - + # Test 5: Verify state persists across multiple get calls for _ in range(5): self.assertTrue(get_local_storage_setup_state_readonly()) - + finally: # Note: We cannot reset readonly state to False as there's no reset function # This is by design - once readonly is set, it stays set for the process @@ -746,58 +762,60 @@ def test_readonly_state_interaction_with_storage_put_method(self): set_local_storage_setup_state_readonly, set_local_storage_setup_state_exception, ) - + # Clear exception state set_local_storage_setup_state_exception("") - + # Set readonly state set_local_storage_setup_state_readonly() self.assertTrue(get_local_storage_setup_state_readonly()) - + # Create storage instance with disabled state to test readonly behavior with LocalFileStorage(os.path.join(TEST_FOLDER, "readonly_interaction_test")) as stor: # Manually disable storage to simulate permission failure scenario stor._enabled = False - + # When storage is disabled and readonly state is set, put() should return CLIENT_READONLY result = stor.put(test_input) self.assertEqual(result, StorageExportResult.CLIENT_READONLY) - + def test_storage_put_invalid_return_type(self): test_input = (1, 2, 3) - + with LocalFileStorage(os.path.join(TEST_FOLDER, "invalid_return_test")) as stor: # Mock _check_storage_size to return a non-boolean value - with mock.patch.object(stor, '_check_storage_size', return_value=42): + with mock.patch.object(stor, "_check_storage_size", return_value=42): result = stor.put(test_input) # Should maintain return type contract despite invalid internal return - self.assertTrue(isinstance(result, (StorageExportResult, str)), - f"Expected StorageExportResult or str, got {type(result)}") + self.assertTrue( + isinstance(result, (StorageExportResult, str)), + f"Expected StorageExportResult or str, got {type(result)}", + ) def test_readonly_state_priority_over_exception_state(self): test_input = (1, 2, 3) test_exception_message = "Some storage exception" - + from azure.monitor.opentelemetry.exporter.statsbeat.customer._state import ( get_local_storage_setup_state_readonly, set_local_storage_setup_state_readonly, get_local_storage_setup_state_exception, set_local_storage_setup_state_exception, ) - + # Set both readonly and exception states set_local_storage_setup_state_readonly() set_local_storage_setup_state_exception(test_exception_message) - + # Verify both states are set self.assertTrue(get_local_storage_setup_state_readonly()) self.assertEqual(get_local_storage_setup_state_exception(), test_exception_message) - + # Create storage instance with disabled state with LocalFileStorage(os.path.join(TEST_FOLDER, "readonly_priority_test")) as stor: # Manually disable storage stor._enabled = False - + # Readonly state should take priority over exception state # Based on the put() method logic: readonly is checked first result = stor.put(test_input) @@ -811,11 +829,11 @@ def test_readonly_state_thread_safety(self): get_local_storage_setup_state_readonly, set_local_storage_setup_state_readonly, ) - + # Track results from multiple threads results = [] errors = [] - + def readonly_operations(): try: # Each thread sets readonly and gets the value @@ -825,24 +843,24 @@ def readonly_operations(): results.append(value) except Exception as e: errors.append(str(e)) - + # Create multiple threads threads = [] for _ in range(10): thread = threading.Thread(target=readonly_operations) threads.append(thread) - + # Start all threads for thread in threads: thread.start() - + # Wait for all threads to complete for thread in threads: thread.join() - + # Verify no errors occurred self.assertEqual(len(errors), 0, f"Thread safety errors: {errors}") - + # Verify all operations succeeded and returned True self.assertEqual(len(results), 10) for result in results: @@ -856,24 +874,24 @@ def test_readonly_state_persistence_across_storage_instances(self): set_local_storage_setup_state_readonly, set_local_storage_setup_state_exception, ) - + set_local_storage_setup_state_exception("") set_local_storage_setup_state_readonly() - + # First storage instance stor1 = LocalFileStorage(os.path.join(TEST_FOLDER, "readonly_persist_test1")) stor1._enabled = False # Simulate disabled state result1 = stor1.put(test_input) self.assertEqual(result1, StorageExportResult.CLIENT_READONLY) stor1.close() - + # Second storage instance should also see readonly state stor2 = LocalFileStorage(os.path.join(TEST_FOLDER, "readonly_persist_test2")) stor2._enabled = False # Simulate disabled state result2 = stor2.put(test_input) self.assertEqual(result2, StorageExportResult.CLIENT_READONLY) stor2.close() - + # Verify readonly state is still set self.assertTrue(get_local_storage_setup_state_readonly()) @@ -882,17 +900,17 @@ def test_readonly_state_direct_access_vs_function_access(self): get_local_storage_setup_state_readonly, set_local_storage_setup_state_readonly, _LOCAL_STORAGE_SETUP_STATE, - _LOCAL_STORAGE_SETUP_STATE_LOCK + _LOCAL_STORAGE_SETUP_STATE_LOCK, ) - + set_local_storage_setup_state_readonly() - + # Compare function access with direct access function_value = get_local_storage_setup_state_readonly() - + with _LOCAL_STORAGE_SETUP_STATE_LOCK: direct_value = _LOCAL_STORAGE_SETUP_STATE["READONLY"] - + self.assertEqual(function_value, direct_value) self.assertTrue(function_value) self.assertTrue(direct_value) @@ -902,14 +920,14 @@ def test_readonly_state_idempotent_set_operations(self): get_local_storage_setup_state_readonly, set_local_storage_setup_state_readonly, ) - + # Multiple set operations should be idempotent initial_state = get_local_storage_setup_state_readonly() - + for _ in range(5): set_local_storage_setup_state_readonly() self.assertTrue(get_local_storage_setup_state_readonly()) - + # State should remain True after multiple sets final_state = get_local_storage_setup_state_readonly() self.assertTrue(final_state) @@ -920,10 +938,10 @@ def test_check_and_set_folder_permissions_unix_multiuser_scenario(self): get_local_storage_setup_state_exception, set_local_storage_setup_state_exception, ) - + # Clear any existing exception state set_local_storage_setup_state_exception("") - + storage_abs_path = _get_storage_directory(DUMMY_INSTRUMENTATION_KEY) with mock.patch(f"{STORAGE_MODULE}.os.name", "posix"): @@ -945,21 +963,21 @@ def mock_makedirs(path, mode=0o777, exist_ok=False): self.assertEqual( makedirs_calls, - [(storage_abs_path, '0o777', True)], + [(storage_abs_path, "0o777", True)], f"Unexpected makedirs calls: {makedirs_calls}", ) self.assertEqual( - {(storage_abs_path, '0o700')}, + {(storage_abs_path, "0o700")}, {(call_path, mode) for call_path, mode in chmod_calls}, f"Unexpected chmod calls: {chmod_calls}", ) stor.close() - + # Clean up set_local_storage_setup_state_exception("") - + def test_check_and_set_folder_permissions_unix_multiuser_parent_permission_failure(self): from azure.monitor.opentelemetry.exporter.statsbeat.customer._state import ( get_local_storage_setup_state_exception, @@ -972,6 +990,7 @@ def test_check_and_set_folder_permissions_unix_multiuser_parent_permission_failu storage_abs_path = _get_storage_directory(DUMMY_INSTRUMENTATION_KEY) with mock.patch(f"{STORAGE_MODULE}.os.name", "posix"): + def mock_makedirs(path, mode=0o777, exist_ok=False): raise PermissionError("Operation not permitted on parent directory") @@ -986,7 +1005,7 @@ def mock_makedirs(path, mode=0o777, exist_ok=False): self.assertEqual(exception_state, "Operation not permitted on parent directory") stor.close() - + # Clean up set_local_storage_setup_state_exception("") @@ -997,13 +1016,14 @@ def test_check_and_set_folder_permissions_unix_multiuser_storage_permission_fail get_local_storage_setup_state_exception, set_local_storage_setup_state_exception, ) - + # Clear any existing exception state set_local_storage_setup_state_exception("") storage_abs_path = _get_storage_directory(DUMMY_INSTRUMENTATION_KEY) with mock.patch(f"{STORAGE_MODULE}.os.name", "posix"): + def mock_chmod(path, mode): if mode == 0o700: raise PermissionError(test_error_message) @@ -1020,6 +1040,6 @@ def mock_chmod(path, mode): self.assertEqual(exception_state, test_error_message) stor.close() - + # Clean up set_local_storage_setup_state_exception("") diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_utils.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_utils.py index 882f2a81a0a1..d10d82127a61 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_utils.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_utils.py @@ -8,7 +8,7 @@ import unittest from azure.monitor.opentelemetry.exporter import _utils -from azure.monitor.opentelemetry.exporter._generated.models import TelemetryItem +from azure.monitor.opentelemetry.exporter._generated.exporter.models import TelemetryItem from opentelemetry.sdk.resources import Resource from unittest.mock import patch @@ -24,8 +24,6 @@ TEST_SDK_VERSION_PREFIX, ), } -TEST_TIMESTAMP = "TEST_TIMESTAMP" -TEST_TIME = "TEST_TIME" TEST_WEBSITE_SITE_NAME = "TEST_WEBSITE_SITE_NAME" TEST_KUBERNETES_SERVICE_HOST = "TEST_KUBERNETES_SERVICE_HOST" TEST_AKS_ARM_NAMESPACE_ID = "TEST_AKS_ARM_NAMESPACE_ID" @@ -222,7 +220,6 @@ def test_populate_part_a_fields_aks_undefined(self): self.assertEqual(tags.get("ai.cloud.roleInstance"), "testPodName") self.assertEqual(tags.get("ai.internal.nodeName"), tags.get("ai.cloud.roleInstance")) - def test_populate_part_a_fields_aks_with_service(self): resource = Resource( { @@ -263,16 +260,17 @@ def test_populate_part_a_fields_aks_with_unknown_service(self): self.assertEqual(tags.get("ai.cloud.roleInstance"), "testPodName") self.assertEqual(tags.get("ai.internal.nodeName"), tags.get("ai.cloud.roleInstance")) - @patch("azure.monitor.opentelemetry.exporter._utils.ns_to_iso_str", return_value=TEST_TIME) @patch("azure.monitor.opentelemetry.exporter._utils.azure_monitor_context", TEST_AZURE_MONITOR_CONTEXT) - def test_create_telemetry_item(self, mock_ns_to_iso_str): - result = _utils._create_telemetry_item(TEST_TIMESTAMP) + def test_create_telemetry_item(self): + time_ns = time.time_ns() + expected_datetime = datetime.datetime.fromtimestamp(time_ns / 1e9, tz=datetime.timezone.utc) + result = _utils._create_telemetry_item(time_ns) expected_tags = dict(TEST_AZURE_MONITOR_CONTEXT) expected = TelemetryItem( name="", instrumentation_key="", tags=expected_tags, - time=TEST_TIME, + time=expected_datetime, ) self.assertEqual(result, expected) @@ -554,17 +552,25 @@ def test_attach_off_app_service_with_agent(self, mock_isdir): # This is not an expected scenario and just tests the default self.assertFalse(_utils._is_attach_enabled()) - @patch.dict("azure.monitor.opentelemetry.exporter._utils.environ", { - "KUBERNETES_SERVICE_HOST": TEST_KUBERNETES_SERVICE_HOST, - "AKS_ARM_NAMESPACE_ID": TEST_AKS_ARM_NAMESPACE_ID, - }, clear=True) + @patch.dict( + "azure.monitor.opentelemetry.exporter._utils.environ", + { + "KUBERNETES_SERVICE_HOST": TEST_KUBERNETES_SERVICE_HOST, + "AKS_ARM_NAMESPACE_ID": TEST_AKS_ARM_NAMESPACE_ID, + }, + clear=True, + ) def test_attach_aks(self): # This is not an expected scenario and just tests the default self.assertTrue(_utils._is_attach_enabled()) - @patch.dict("azure.monitor.opentelemetry.exporter._utils.environ", { - "KUBERNETES_SERVICE_HOST": TEST_KUBERNETES_SERVICE_HOST, - }, clear=True) + @patch.dict( + "azure.monitor.opentelemetry.exporter._utils.environ", + { + "KUBERNETES_SERVICE_HOST": TEST_KUBERNETES_SERVICE_HOST, + }, + clear=True, + ) def test_aks_no_attach(self): # This is not an expected scenario and just tests the default self.assertFalse(_utils._is_attach_enabled()) @@ -616,10 +622,7 @@ def test_is_any_synthetic_source_always_on(self): self.assertTrue(_utils._is_any_synthetic_source(properties)) def test_is_any_synthetic_source_both(self): - properties = { - "user_agent.synthetic.type": "bot", - "http.user_agent": "Azure-Load-Testing/1.0 AlwaysOn" - } + properties = {"user_agent.synthetic.type": "bot", "http.user_agent": "Azure-Load-Testing/1.0 AlwaysOn"} self.assertTrue(_utils._is_any_synthetic_source(properties)) def test_is_any_synthetic_source_none(self): diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_rate_limited_sampling.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_rate_limited_sampling.py index 6834a8d9c424..1a67879e0f8a 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_rate_limited_sampling.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_rate_limited_sampling.py @@ -25,44 +25,44 @@ def create_parent_span(sampled: bool, sample_rate: Optional[float] = None, is_remote: bool = False): trace_flags = TraceFlags(0x01) if sampled else TraceFlags(0x00) - + span_context = SpanContext( - trace_id=0x1234567890abcdef1234567890abcdef, - span_id=0x1234567890abcdef, + trace_id=0x1234567890ABCDEF1234567890ABCDEF, + span_id=0x1234567890ABCDEF, is_remote=is_remote, trace_flags=trace_flags, trace_state=None, ) - + mock_span = Mock() mock_span.get_span_context.return_value = span_context mock_span.is_recording.return_value = sampled - + attributes = {} if sample_rate is not None: attributes[_SAMPLE_RATE_KEY] = sample_rate mock_span.attributes = attributes - + return mock_span class TestRateLimitedSampler(unittest.TestCase): - + def setUp(self): # Use a mock for time.time_ns() instead of nano_time_supplier injection self.current_time = 1_000_000_000_000 # 1 second in nanoseconds - self.time_patcher = patch('time.time_ns', side_effect=lambda: self.current_time) + self.time_patcher = patch("time.time_ns", side_effect=lambda: self.current_time) self.mock_time = self.time_patcher.start() - + def tearDown(self): self.time_patcher.stop() - + def advance_time(self, nanoseconds_increment: int): self.current_time += nanoseconds_increment - + def get_current_time_nanoseconds(self) -> int: return self.current_time - + # Test sampling behavior with a high target rate and moderate span frequency def test_constant_rate_sampling(self): target_rate = 1000.0 @@ -70,6 +70,7 @@ def test_constant_rate_sampling(self): # Reset initial state to use our controlled time from azure.monitor.opentelemetry.exporter.export.trace._rate_limited_sampling import _State + initial_time = self.current_time sampler._sampling_percentage_generator._state = _State(0.0, 0.0, initial_time) sampler._sampling_percentage_generator._round_to_nearest = False @@ -81,15 +82,11 @@ def test_constant_rate_sampling(self): for i in range(num_spans): self.advance_time(nanoseconds_between_spans) - result = sampler.should_sample( - parent_context=None, - trace_id=i, - name=f"test-span-{i}" - ) + result = sampler.should_sample(parent_context=None, trace_id=i, name=f"test-span-{i}") self.assertIsInstance(result, SamplingResult) self.assertIn(result.decision, [Decision.RECORD_AND_SAMPLE, Decision.DROP]) - + # Check if _SAMPLE_RATE_KEY is present only when sampling percentage is not 100% sampling_percentage = sampler._sampling_percentage_generator.get() if sampling_percentage != 100.0: @@ -107,6 +104,7 @@ def test_high_volume_sampling(self): # Reset initial state to use our controlled time from azure.monitor.opentelemetry.exporter.export.trace._rate_limited_sampling import _State + initial_time = self.current_time sampler._sampling_percentage_generator._state = _State(0.0, 0.0, initial_time) @@ -115,17 +113,14 @@ def test_high_volume_sampling(self): sampled_count = 0 import random + random.seed(42) trace_ids = [random.getrandbits(128) for _ in range(num_spans)] for i in range(num_spans): self.advance_time(nanoseconds_between_spans) - result = sampler.should_sample( - parent_context=None, - trace_id=trace_ids[i], - name=f"high-volume-span-{i}" - ) + result = sampler.should_sample(parent_context=None, trace_id=trace_ids[i], name=f"high-volume-span-{i}") if result.decision == Decision.RECORD_AND_SAMPLE: sampled_count += 1 @@ -137,9 +132,10 @@ def test_high_volume_sampling(self): def test_rate_adaptation_increasing_load(self): target_rate = 20.0 sampler = RateLimitedSampler(target_rate) - + # Reset initial state to use our controlled time from azure.monitor.opentelemetry.exporter.export.trace._rate_limited_sampling import _State + initial_time = self.current_time sampler._sampling_percentage_generator._state = _State(0.0, 0.0, initial_time) @@ -153,6 +149,7 @@ def test_rate_adaptation_increasing_load(self): sampled_phase2 = 0 import random + random.seed(123) trace_ids_phase1 = [random.getrandbits(128) for _ in range(phase1_spans)] trace_ids_phase2 = [random.getrandbits(128) for _ in range(phase2_spans)] @@ -175,71 +172,56 @@ def test_rate_adaptation_increasing_load(self): phase1_percentage = (sampled_phase1 / phase1_spans) * 100 phase2_percentage = (sampled_phase2 / phase2_spans) * 100 - self.assertLess(phase2_percentage, phase1_percentage, - "Sampling percentage should decrease under high load") - + self.assertLess(phase2_percentage, phase1_percentage, "Sampling percentage should decrease under high load") + # Test sampler instantiation with various target rates and description format def test_sampler_creation(self): for target_rate in [0.1, 0.5, 1.0, 5.0, 100.0]: sampler = RateLimitedSampler(target_rate) self.assertIsInstance(sampler, RateLimitedSampler) - self.assertEqual( - sampler.get_description(), - f"RateLimitedSampler{{{target_rate}}}" - ) - + self.assertEqual(sampler.get_description(), f"RateLimitedSampler{{{target_rate}}}") + # Test that negative target rates raise a ValueError def test_negative_rate_raises_error(self): with self.assertRaises(ValueError): RateLimitedSampler(-1.0) - + # Test sampling behavior with zero target rate def test_zero_rate_sampling(self): sampler = RateLimitedSampler(0.0) - + for i in range(100): - result = sampler.should_sample( - parent_context=None, - trace_id=i, - name="test-span" - ) - + result = sampler.should_sample(parent_context=None, trace_id=i, name="test-span") + self.assertIsInstance(result, SamplingResult) self.assertIn(result.decision, [Decision.RECORD_AND_SAMPLE, Decision.DROP]) self.assertIn(_SAMPLE_RATE_KEY, result.attributes) - + # Test that the same trace ID produces consistent sampling decisions def test_sampling_decision_consistency(self): sampler = RateLimitedSampler(50.0) - + trace_id = 12345 - + results = [] for _ in range(10): - result = sampler.should_sample( - parent_context=None, - trace_id=trace_id, - name="test-span" - ) + result = sampler.should_sample(parent_context=None, trace_id=trace_id, name="test-span") results.append(result) - + first_decision = results[0].decision for result in results[1:]: - self.assertEqual(result.decision, first_decision, - "Sampling decision should be consistent for same trace ID") - + self.assertEqual( + result.decision, first_decision, "Sampling decision should be consistent for same trace ID" + ) + # Test that sampling results include valid sample rate attributes def test_sampling_attributes(self): sampler = RateLimitedSampler(25.0) - - result = sampler.should_sample( - parent_context=None, - trace_id=123, - name="test-span" - ) - + + result = sampler.should_sample(parent_context=None, trace_id=123, name="test-span") + self.assertIsInstance(result, SamplingResult) - + # Check if _SAMPLE_RATE_KEY is present only when sampling percentage is not 100% sampling_percentage = sampler._sampling_percentage_generator.get() if sampling_percentage != 100.0: @@ -249,30 +231,26 @@ def test_sampling_attributes(self): if isinstance(sample_rate, (int, float)): self.assertGreaterEqual(float(sample_rate), 0.0) self.assertLessEqual(float(sample_rate), 100.0) - + # Test sampling behavior with edge case trace ID values def test_sampler_with_extreme_trace_ids(self): sampler = RateLimitedSampler(1.0) - + extreme_trace_ids = [ 0, 1, 2**32 - 1, 2**64 - 1, - 0xabcdef123456789, + 0xABCDEF123456789, ] - + for trace_id in extreme_trace_ids: with self.subTest(trace_id=trace_id): - result = sampler.should_sample( - parent_context=None, - trace_id=trace_id, - name="test-span" - ) - + result = sampler.should_sample(parent_context=None, trace_id=trace_id, name="test-span") + self.assertIsInstance(result, SamplingResult) self.assertIn(result.decision, [Decision.RECORD_AND_SAMPLE, Decision.DROP]) - + # Check if _SAMPLE_RATE_KEY is present only when sampling percentage is not 100% sampling_percentage = sampler._sampling_percentage_generator.get() if sampling_percentage != 100.0: @@ -282,13 +260,13 @@ def test_sampler_with_extreme_trace_ids(self): if isinstance(sample_rate, (int, float)): self.assertGreaterEqual(float(sample_rate), 0.0) self.assertLessEqual(float(sample_rate), 100.0) - + # Test that sampler is thread-safe under concurrent access def test_thread_safety(self): sampler = RateLimitedSampler(10.0) results = [] errors = [] - + def worker(): try: for i in range(50): @@ -297,15 +275,15 @@ def worker(): time.sleep(0.001) except Exception as e: errors.append(e) - + threads = [threading.Thread(target=worker) for _ in range(5)] - + for thread in threads: thread.start() - + for thread in threads: thread.join() - + self.assertEqual(len(errors), 0, f"Thread safety errors: {errors}") self.assertGreater(len(results), 0) for result in results: @@ -320,101 +298,93 @@ def worker(): # Test inheriting sampling decision from sampled parent span with sample rate def test_parent_span_sampled_with_sample_rate(self): sampler = RateLimitedSampler(10.0) - + parent_span = create_parent_span(sampled=True, sample_rate=75.0, is_remote=False) - - with patch('azure.monitor.opentelemetry.exporter.export.trace._utils.get_current_span', return_value=parent_span): + + with patch( + "azure.monitor.opentelemetry.exporter.export.trace._utils.get_current_span", return_value=parent_span + ): context = Mock() - - result = sampler.should_sample( - parent_context=context, - trace_id=0xabc123, - name="test-span" - ) - + + result = sampler.should_sample(parent_context=context, trace_id=0xABC123, name="test-span") + self.assertEqual(result.decision, Decision.RECORD_AND_SAMPLE) self.assertEqual(result.attributes[_SAMPLE_RATE_KEY], 75.0) # Test inheriting sampling decision from non-sampled parent span with sample rate def test_parent_span_not_sampled_with_sample_rate(self): sampler = RateLimitedSampler(10.0) - + parent_span = create_parent_span(sampled=False, sample_rate=25.0, is_remote=False) - - with patch('azure.monitor.opentelemetry.exporter.export.trace._utils.get_current_span', return_value=parent_span): + + with patch( + "azure.monitor.opentelemetry.exporter.export.trace._utils.get_current_span", return_value=parent_span + ): context = Mock() - - result = sampler.should_sample( - parent_context=context, - trace_id=0xabc123, - name="test-span" - ) - + + result = sampler.should_sample(parent_context=context, trace_id=0xABC123, name="test-span") + self.assertEqual(result.decision, Decision.DROP) self.assertEqual(result.attributes[_SAMPLE_RATE_KEY], 0.0) # Test parent span with 100% sample rate maintains decision def test_parent_span_sampled_with_100_percent_sample_rate(self): sampler = RateLimitedSampler(5.0) - + parent_span = create_parent_span(sampled=True, sample_rate=100.0, is_remote=False) - - with patch('azure.monitor.opentelemetry.exporter.export.trace._utils.get_current_span', return_value=parent_span): + + with patch( + "azure.monitor.opentelemetry.exporter.export.trace._utils.get_current_span", return_value=parent_span + ): context = Mock() - - result = sampler.should_sample( - parent_context=context, - trace_id=0xabc123, - name="test-span" - ) - + + result = sampler.should_sample(parent_context=context, trace_id=0xABC123, name="test-span") + self.assertEqual(result.decision, Decision.RECORD_AND_SAMPLE) self.assertEqual(result.attributes[_SAMPLE_RATE_KEY], 100.0) # Test that remote parent spans are ignored for sampling decisions def test_parent_span_remote_ignored(self): sampler = RateLimitedSampler(5.0) - + parent_span = create_parent_span(sampled=True, sample_rate=80.0, is_remote=True) - - with patch('azure.monitor.opentelemetry.exporter.export.trace._utils.get_current_span', return_value=parent_span): + + with patch( + "azure.monitor.opentelemetry.exporter.export.trace._utils.get_current_span", return_value=parent_span + ): context = Mock() - + from azure.monitor.opentelemetry.exporter.export.trace._rate_limited_sampling import _State + initial_time = self.current_time sampler._sampling_percentage_generator._state = _State(0.0, 0.0, initial_time) - + self.advance_time(100_000_000) - - result = sampler.should_sample( - parent_context=context, - trace_id=0xabc123, - name="test-span" - ) - + + result = sampler.should_sample(parent_context=context, trace_id=0xABC123, name="test-span") + self.assertNotEqual(result.attributes[_SAMPLE_RATE_KEY], 80.0) # Test parent span without sample rate attribute uses local sampling def test_parent_span_no_sample_rate_attribute(self): sampler = RateLimitedSampler(5.0) - + parent_span = create_parent_span(sampled=True, sample_rate=None, is_remote=False) - - with patch('azure.monitor.opentelemetry.exporter.export.trace._utils.get_current_span', return_value=parent_span): + + with patch( + "azure.monitor.opentelemetry.exporter.export.trace._utils.get_current_span", return_value=parent_span + ): context = Mock() - + from azure.monitor.opentelemetry.exporter.export.trace._rate_limited_sampling import _State + initial_time = self.current_time sampler._sampling_percentage_generator._state = _State(0.0, 0.0, initial_time) - + self.advance_time(100_000_000) - - result = sampler.should_sample( - parent_context=context, - trace_id=0xabc123, - name="test-span" - ) - + + result = sampler.should_sample(parent_context=context, trace_id=0xABC123, name="test-span") + self.assertIn(result.decision, [Decision.RECORD_AND_SAMPLE, Decision.DROP]) sample_rate = result.attributes[_SAMPLE_RATE_KEY] self.assertIsInstance(sample_rate, (int, float)) @@ -422,28 +392,27 @@ def test_parent_span_no_sample_rate_attribute(self): # Test handling parent span with invalid span context def test_parent_span_invalid_context(self): sampler = RateLimitedSampler(5.0) - + parent_span = Mock() invalid_context = Mock() invalid_context.is_valid = False invalid_context.is_remote = False parent_span.get_span_context.return_value = invalid_context - - with patch('azure.monitor.opentelemetry.exporter.export.trace._utils.get_current_span', return_value=parent_span): + + with patch( + "azure.monitor.opentelemetry.exporter.export.trace._utils.get_current_span", return_value=parent_span + ): context = Mock() - + from azure.monitor.opentelemetry.exporter.export.trace._rate_limited_sampling import _State + initial_time = self.current_time sampler._sampling_percentage_generator._state = _State(0.0, 0.0, initial_time) - + self.advance_time(100_000_000) - - result = sampler.should_sample( - parent_context=context, - trace_id=0xabc123, - name="test-span" - ) - + + result = sampler.should_sample(parent_context=context, trace_id=0xABC123, name="test-span") + self.assertIn(result.decision, [Decision.RECORD_AND_SAMPLE, Decision.DROP]) sample_rate = result.attributes[_SAMPLE_RATE_KEY] self.assertIsInstance(sample_rate, (int, float)) @@ -451,19 +420,16 @@ def test_parent_span_invalid_context(self): # Test sampling behavior when no parent context is provided def test_no_parent_context_uses_local_sampling(self): sampler = RateLimitedSampler(5.0) - + from azure.monitor.opentelemetry.exporter.export.trace._rate_limited_sampling import _State + initial_time = self.current_time sampler._sampling_percentage_generator._state = _State(0.0, 0.0, initial_time) - + self.advance_time(100_000_000) - - result = sampler.should_sample( - parent_context=None, - trace_id=0xabc123, - name="test-span" - ) - + + result = sampler.should_sample(parent_context=None, trace_id=0xABC123, name="test-span") + self.assertIn(result.decision, [Decision.RECORD_AND_SAMPLE, Decision.DROP]) sample_rate = result.attributes[_SAMPLE_RATE_KEY] self.assertIsInstance(sample_rate, (int, float)) @@ -471,55 +437,52 @@ def test_no_parent_context_uses_local_sampling(self): # Test that original span attributes are preserved in sampling result def test_parent_context_preserves_original_attributes(self): sampler = RateLimitedSampler(10.0) - + parent_span = create_parent_span(sampled=True, sample_rate=50.0, is_remote=False) - - with patch('azure.monitor.opentelemetry.exporter.export.trace._utils.get_current_span', return_value=parent_span): + + with patch( + "azure.monitor.opentelemetry.exporter.export.trace._utils.get_current_span", return_value=parent_span + ): context = Mock() - - original_attributes = { - "service.name": "test-service", - "operation.name": "test-operation" - } - + + original_attributes = {"service.name": "test-service", "operation.name": "test-operation"} + result = sampler.should_sample( - parent_context=context, - trace_id=0xabc123, - name="test-span", - attributes=original_attributes + parent_context=context, trace_id=0xABC123, name="test-span", attributes=original_attributes ) - + self.assertEqual(result.decision, Decision.RECORD_AND_SAMPLE) self.assertEqual(result.attributes["service.name"], "test-service") self.assertEqual(result.attributes["operation.name"], "test-operation") self.assertEqual(result.attributes[_SAMPLE_RATE_KEY], 50.0) + class TestUtilityFunctions(unittest.TestCase): - + # Test that DJB2 hash produces consistent results for the same input def test_djb2_hash_consistency(self): from azure.monitor.opentelemetry.exporter.export.trace._utils import _get_DJB2_sample_score - + trace_id = "test-trace-id-12345" scores = [_get_DJB2_sample_score(trace_id) for _ in range(10)] self.assertTrue(all(score == scores[0] for score in scores)) - + # Test DJB2 hash function with edge case inputs def test_djb2_hash_edge_cases(self): from azure.monitor.opentelemetry.exporter.export.trace._utils import _get_DJB2_sample_score - + edge_cases = [ "", "0", "a" * 1000, "0123456789abcdef" * 8, ] - + for trace_id in edge_cases: with self.subTest(trace_id=trace_id): score = _get_DJB2_sample_score(trace_id) self.assertIsInstance(score, float) self.assertGreaterEqual(score, 0) - self.assertLess(score, 100) \ No newline at end of file + self.assertLess(score, 100) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_trace.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_trace.py index 77aeee0127c0..c09e1655e5ec 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_trace.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_trace.py @@ -6,6 +6,7 @@ import platform import shutil import unittest +from datetime import datetime from unittest import mock # pylint: disable=import-error @@ -45,7 +46,7 @@ _AZURE_SDK_OPENTELEMETRY_NAME, _AZURE_AI_SDK_NAME, ) -from azure.monitor.opentelemetry.exporter._generated.models import ContextTagKeys +from azure.monitor.opentelemetry.exporter._generated.exporter.models import ContextTagKeys from azure.monitor.opentelemetry.exporter._utils import azure_monitor_context @@ -372,7 +373,7 @@ def test_span_to_envelope_client_http(self): envelope = exporter._span_to_envelope(span) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.RemoteDependency") - self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z") + self.assertEqual(envelope.time, datetime.fromisoformat("2019-12-04T21:18:36.027613+00:00")) self.assertEqual(envelope.data.base_data.name, "GET /wiki/Rabbit") self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9") self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001") @@ -452,10 +453,7 @@ def test_span_to_envelope_client_http(self): envelope = exporter._span_to_envelope(span) self.assertEqual(envelope.data.base_data.target, "www.example.com") - span._attributes = { - "http.request.method": "GET", - "gen_ai.system": "az.ai.inference" - } + span._attributes = {"http.request.method": "GET", "gen_ai.system": "az.ai.inference"} envelope = exporter._span_to_envelope(span) self.assertEqual(envelope.data.base_data.target, "az.ai.inference") self.assertEqual(envelope.data.base_data.name, "GET /") @@ -465,7 +463,7 @@ def test_span_to_envelope_client_http(self): "server.address": "www.example.com", "server.port": 80, "url.scheme": "http", - "gen_ai.system": "az.ai.inference" + "gen_ai.system": "az.ai.inference", } envelope = exporter._span_to_envelope(span) self.assertEqual(envelope.data.base_data.target, "www.example.com") @@ -578,7 +576,7 @@ def test_span_to_envelope_client_db(self): envelope = exporter._span_to_envelope(span) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.RemoteDependency") - self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z") + self.assertEqual(envelope.time, datetime.fromisoformat("2019-12-04T21:18:36.027613+00:00")) self.assertEqual(envelope.data.base_data.name, "test") self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9") self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001") @@ -697,7 +695,7 @@ def test_span_to_envelope_client_rpc(self): envelope = exporter._span_to_envelope(span) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.RemoteDependency") - self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z") + self.assertEqual(envelope.time, datetime.fromisoformat("2019-12-04T21:18:36.027613+00:00")) self.assertEqual(envelope.data.base_data.name, "test") self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9") self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001") @@ -743,7 +741,7 @@ def test_span_to_envelope_client_messaging(self): envelope = exporter._span_to_envelope(span) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.RemoteDependency") - self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z") + self.assertEqual(envelope.time, datetime.fromisoformat("2019-12-04T21:18:36.027613+00:00")) self.assertEqual(envelope.data.base_data.name, "test") self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9") self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001") @@ -787,7 +785,7 @@ def test_span_to_envelope_client_gen_ai(self): envelope = exporter._span_to_envelope(span) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.RemoteDependency") - self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z") + self.assertEqual(envelope.time, datetime.fromisoformat("2019-12-04T21:18:36.027613+00:00")) self.assertEqual(envelope.data.base_data.name, "test") self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9") self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001") @@ -799,7 +797,7 @@ def test_span_to_envelope_client_gen_ai(self): self.assertEqual(envelope.data.base_data.type, "GenAI | az.ai.inference") self.assertEqual(envelope.data.base_data.target, "az.ai.inference") self.assertEqual(len(envelope.data.base_data.properties), 1) - + def test_span_to_envelope_client_internal_gen_ai_type(self): exporter = self._exporter start_time = 1575494316027613500 @@ -822,7 +820,7 @@ def test_span_to_envelope_client_internal_gen_ai_type(self): span._status = Status(status_code=StatusCode.UNSET) envelope = exporter._span_to_envelope(span) self.assertEqual(envelope.data.base_data.type, "GenAI | az.ai.inference") - + def test_span_to_envelope_client_multiple_types_with_gen_ai(self): exporter = self._exporter start_time = 1575494316027613500 @@ -877,7 +875,7 @@ def test_span_to_envelope_client_azure(self): envelope = exporter._span_to_envelope(span) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.RemoteDependency") - self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z") + self.assertEqual(envelope.time, datetime.fromisoformat("2019-12-04T21:18:36.027613+00:00")) self.assertEqual(envelope.data.base_data.name, "test") self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9") self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001") @@ -922,7 +920,7 @@ def test_span_to_envelope_producer_messaging(self): envelope = exporter._span_to_envelope(span) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.RemoteDependency") - self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z") + self.assertEqual(envelope.time, datetime.fromisoformat("2019-12-04T21:18:36.027613+00:00")) self.assertEqual(envelope.data.base_data.name, "test") self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9") self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001") @@ -978,7 +976,7 @@ def test_span_to_envelope_internal(self): envelope = exporter._span_to_envelope(span) self.assertEqual(envelope.name, "Microsoft.ApplicationInsights.RemoteDependency") - self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z") + self.assertEqual(envelope.time, datetime.fromisoformat("2019-12-04T21:18:36.027613+00:00")) self.assertEqual(envelope.data.base_data.name, "test") self.assertEqual(envelope.data.base_data.id, "a6f5d48acb4d31d9") self.assertEqual(envelope.data.base_data.duration, "0.00:00:01.001") @@ -1548,7 +1546,7 @@ def test_span_events_to_envelopes_exception(self): self.assertEqual( envelope.tags.get(ContextTagKeys.AI_OPERATION_PARENT_ID), "{:016x}".format(span.context.span_id) ) - self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z") + self.assertEqual(envelope.time, datetime.fromisoformat("2019-12-04T21:18:36.027613+00:00")) self.assertEqual(len(envelope.data.base_data.properties), 0) self.assertEqual(len(envelope.data.base_data.exceptions), 1) self.assertEqual(envelope.data.base_data.exceptions[0].type_name, "ZeroDivisionError") @@ -1606,7 +1604,7 @@ def test_span_events_to_envelopes_message(self): self.assertEqual( envelope.tags.get(ContextTagKeys.AI_OPERATION_PARENT_ID), "{:016x}".format(span.context.span_id) ) - self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z") + self.assertEqual(envelope.time, datetime.fromisoformat("2019-12-04T21:18:36.027613+00:00")) self.assertEqual(len(envelope.data.base_data.properties), 1) self.assertEqual(envelope.data.base_data.properties["test"], "asd") self.assertEqual(envelope.data.base_data.message, "test event") @@ -1666,7 +1664,7 @@ def test_span_events_to_envelopes_sample_rate(self): self.assertEqual( envelope.tags.get(ContextTagKeys.AI_OPERATION_PARENT_ID), "{:016x}".format(span.context.span_id) ) - self.assertEqual(envelope.time, "2019-12-04T21:18:36.027613Z") + self.assertEqual(envelope.time, datetime.fromisoformat("2019-12-04T21:18:36.027613+00:00")) self.assertEqual(len(envelope.data.base_data.properties), 1) self.assertEqual(envelope.data.base_data.properties["test"], "asd") self.assertEqual(envelope.data.base_data.message, "test event") diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_trace_utils.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_trace_utils.py index e6ff96dee377..9fffe9089d21 100644 --- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_trace_utils.py +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/trace/test_trace_utils.py @@ -7,6 +7,7 @@ from azure.monitor.opentelemetry.exporter.export.trace._utils import ( _get_DJB2_sample_score, ) + # fixedint was removed as a source dependency. It is used as a dev requirement to test sample score from fixedint import Int32 from azure.monitor.opentelemetry.exporter._constants import ( @@ -32,7 +33,7 @@ def test_single_character(self): result = _get_DJB2_sample_score("a") # hash = ((5381 << 5) + 5381) + ord('a') # hash = (172192 + 5381) + 97 = 177670 - expected_hash = ((5381 << 5) + 5381) + ord('a') + expected_hash = ((5381 << 5) + 5381) + ord("a") expected = float(expected_hash) / _INT32_MAX self.assertEqual(result, expected) @@ -40,17 +41,17 @@ def test_typical_trace_id(self): """Test with typical 32-character trace ID.""" trace_id = "12345678901234567890123456789012" result = _get_DJB2_sample_score(trace_id) - + # Manually calculate expected result hash_value = Int32(_SAMPLING_HASH) for char in trace_id: hash_value = ((hash_value << 5) + hash_value) + ord(char) - + if hash_value == _INT32_MIN: hash_value = int(_INT32_MAX) else: hash_value = abs(hash_value) - + expected = float(hash_value) / _INT32_MAX self.assertEqual(result, expected) @@ -61,9 +62,9 @@ def test_hex_characters(self): "fedcba9876543210", "aaaaaaaaaaaaaaaa", "0000000000000000", - "ffffffffffffffff" + "ffffffffffffffff", ] - + for trace_id in test_cases: with self.subTest(trace_id=trace_id): result = _get_DJB2_sample_score(trace_id) @@ -76,36 +77,35 @@ def test_int32_overflow_handling(self): # Create a string that should cause overflow long_string = "f" * 100 # 100 'f' characters should cause overflow result = _get_DJB2_sample_score(long_string) - + self.assertIsInstance(result, float) self.assertGreaterEqual(result, 0.0) self.assertLessEqual(result, 1.0) def test_int32_minimum_value_handling(self): """Test handling when hash equals INTEGER_MIN.""" - # This is tricky to test directly since we need to find a string + # This is tricky to test directly since we need to find a string # that results in exactly _INT32_MIN. Instead, let's test the logic. - + # We'll use a mock to simulate this condition - def mock_djb2_with_min_value(trace_id_hex): # Call original to get the structure, then simulate _INT32_MIN case hash_value = Int32(_SAMPLING_HASH) for char in trace_id_hex: hash_value = ((hash_value << 5) + hash_value) + ord(char) - + # Simulate the case where we get _INT32_MIN if str(trace_id_hex) == "test_min": hash_value = Int32(_INT32_MIN) - + if hash_value == _INT32_MIN: hash_value = int(_INT32_MAX) else: hash_value = abs(hash_value) - + return float(hash_value) / _INT32_MAX - + # Test the _INT32_MIN case result = mock_djb2_with_min_value("test_min") expected = float(_INT32_MAX) / _INT32_MAX @@ -116,7 +116,7 @@ def test_negative_hash_conversion(self): # Find a string that produces a negative hash test_string = "negative_test_case_string" result = _get_DJB2_sample_score(test_string) - + # Result should always be positive (between 0 and 1) self.assertGreaterEqual(result, 0.0) self.assertLessEqual(result, 1.0) @@ -124,10 +124,10 @@ def test_negative_hash_conversion(self): def test_deterministic_output(self): """Test that same input always produces same output.""" trace_id = "abcdef1234567890abcdef1234567890" - + # Call multiple times with same input results = [_get_DJB2_sample_score(trace_id) for _ in range(5)] - + # All results should be identical self.assertTrue(all(r == results[0] for r in results)) @@ -139,9 +139,9 @@ def test_different_inputs_different_outputs(self): "22345678901234567890123456789012", # First digit different "abcdef1234567890fedcba0987654321", # Completely different ] - + results = [_get_DJB2_sample_score(tid) for tid in trace_ids] - + # All results should be different self.assertEqual(len(results), len(set(results))) @@ -153,7 +153,7 @@ def test_boundary_values(self): "00000000000000000000000000000000", # All zeros "ffffffffffffffffffffffffffffffff", # All f's (32 chars) ] - + for trace_id in test_cases: with self.subTest(trace_id=trace_id): result = _get_DJB2_sample_score(trace_id) @@ -165,7 +165,7 @@ def test_constants_used_correctly(self): """Test that the function uses the expected constants.""" # Verify that _SAMPLING_HASH is 5381 (standard DJB2 hash initial value) self.assertEqual(_SAMPLING_HASH, 5381) - + # Verify Int32 constants self.assertEqual(_INT32_MAX, 2147483647) # 2^31 - 1 self.assertEqual(_INT32_MIN, -2147483648) # -2^31 @@ -174,23 +174,23 @@ def test_algorithm_correctness(self): """Test that the DJB2 algorithm is implemented correctly.""" # Test with known input and manually calculated expected output trace_id = "abc" - + # Manual calculation: # Start with 5381 # For 'a' (97): hash = ((5381 << 5) + 5381) + 97 = 177670 # For 'b' (98): hash = ((177670 << 5) + 177670) + 98 = 5823168 # For 'c' (99): hash = ((5823168 << 5) + 5823168) + 99 = 191582563 - + expected_hash = _SAMPLING_HASH for char in trace_id: expected_hash = Int32(((expected_hash << 5) + expected_hash) + ord(char)) - + if expected_hash == _INT32_MIN: expected_hash = int(_INT32_MAX) else: expected_hash = abs(expected_hash) - + expected_result = float(expected_hash) / _INT32_MAX actual_result = _get_DJB2_sample_score(trace_id) - + self.assertEqual(actual_result, expected_result) diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tsp-location.yaml b/sdk/monitor/azure-monitor-opentelemetry-exporter/tsp-location.yaml new file mode 100644 index 000000000000..7aa546a0424f --- /dev/null +++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tsp-location.yaml @@ -0,0 +1,2 @@ +batch: + - azure/monitor/opentelemetry/exporter/_generated