Skip to content

Commit bc3d54a

Browse files
authored
feat: add server address on llm call metrics and tracers (#490)
* feat: add server_address on llm call traces and metrics * feat: update code * feat: use unknown instead of None
1 parent 3d8c814 commit bc3d54a

File tree

2 files changed

+37
-7
lines changed

2 files changed

+37
-7
lines changed

veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py

Lines changed: 28 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
import json
1616

17+
from google.adk.agents.run_config import StreamingMode
1718
from veadk.tracing.telemetry.attributes.extractors.types import (
1819
ExtractorResponse,
1920
LLMAttributesParams,
@@ -507,13 +508,17 @@ def llm_gen_ai_is_streaming(params: LLMAttributesParams) -> ExtractorResponse:
507508
for performance analysis and debugging purposes.
508509
509510
Args:
510-
params: LLM execution parameters (currently not implemented)
511+
params: LLM execution parameters
511512
512513
Returns:
513-
ExtractorResponse: Response containing None (not implemented)
514+
ExtractorResponse: Response containing None
514515
"""
515-
# return params.llm_request.stream
516-
return ExtractorResponse(content=None)
516+
is_streaming = bool(
517+
params.invocation_context.run_config
518+
and params.invocation_context.run_config.streaming_mode != StreamingMode.NONE
519+
)
520+
521+
return ExtractorResponse(content=is_streaming)
517522

518523

519524
def llm_gen_ai_operation_name(params: LLMAttributesParams) -> ExtractorResponse:
@@ -805,6 +810,24 @@ def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorRespon
805810
return ExtractorResponse(content=functions)
806811

807812

813+
def llm_server_address(params: LLMAttributesParams) -> ExtractorResponse:
814+
"""Extract the LLM server address (model API base URL).
815+
816+
Returns the model API base URL configured on the current Agent.
817+
If the Agent or base URL is unavailable, returns 'unknown' to
818+
keep the span attribute consistent.
819+
820+
Args:
821+
params: LLM execution parameters containing invocation context
822+
823+
Returns:
824+
ExtractorResponse: Response containing the server address or 'unknown'
825+
"""
826+
return ExtractorResponse(
827+
content=getattr(params.invocation_context.agent, "model_api_base", "unknown")
828+
)
829+
830+
808831
LLM_ATTRIBUTES = {
809832
# -> 1. attributes
810833
# -> 1.1. request
@@ -813,6 +836,7 @@ def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorRespon
813836
"gen_ai.request.max_tokens": llm_gen_ai_request_max_tokens,
814837
"gen_ai.request.temperature": llm_gen_ai_request_temperature,
815838
"gen_ai.request.top_p": llm_gen_ai_request_top_p,
839+
"server.address": llm_server_address,
816840
# CozeLoop required
817841
"gen_ai.request.functions": llm_gen_ai_request_functions,
818842
# -> 1.2. response

veadk/tracing/telemetry/exporters/apmplus_exporter.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
from typing import Any
1818

1919
from google.adk.agents.invocation_context import InvocationContext
20+
from google.adk.agents.run_config import StreamingMode
2021
from google.adk.events import Event
2122
from google.adk.models.llm_request import LlmRequest
2223
from google.adk.models.llm_response import LlmResponse
@@ -296,13 +297,18 @@ def record_call_llm(
296297
llm_request: Request object with model and parameter details
297298
llm_response: Response object with content and usage metadata
298299
"""
300+
is_streaming = bool(
301+
invocation_context.run_config
302+
and invocation_context.run_config.streaming_mode != StreamingMode.NONE
303+
)
304+
server_address = getattr(invocation_context.agent, "model_api_base", "unknown")
299305
attributes = {
300306
"gen_ai_system": "volcengine",
301307
"gen_ai_response_model": llm_request.model,
302308
"gen_ai_operation_name": "chat",
303309
"gen_ai_operation_type": "llm",
304-
"stream": "false",
305-
"server_address": "api.volcengine.com",
310+
"stream": is_streaming,
311+
"server_address": server_address,
306312
} # required by Volcengine APMPlus
307313

308314
if llm_response.usage_metadata:
@@ -337,7 +343,7 @@ def record_call_llm(
337343
if llm_response.error_code and self.chat_exception_counter:
338344
exception_attributes = {
339345
**attributes,
340-
"error_type": llm_response.error_message,
346+
"error_type": llm_response.error_code,
341347
}
342348
self.chat_exception_counter.add(1, exception_attributes)
343349

0 commit comments

Comments
 (0)