1414
1515import json
1616
17+ from google .adk .agents .run_config import StreamingMode
1718from veadk .tracing .telemetry .attributes .extractors .types import (
1819 ExtractorResponse ,
1920 LLMAttributesParams ,
@@ -507,13 +508,17 @@ def llm_gen_ai_is_streaming(params: LLMAttributesParams) -> ExtractorResponse:
507508 for performance analysis and debugging purposes.
508509
509510 Args:
510- params: LLM execution parameters (currently not implemented)
511+ params: LLM execution parameters
511512
512513 Returns:
513- ExtractorResponse: Response containing None (not implemented)
514+ ExtractorResponse: Response containing None
514515 """
515- # return params.llm_request.stream
516- return ExtractorResponse (content = None )
516+ is_streaming = bool (
517+ params .invocation_context .run_config
518+ and params .invocation_context .run_config .streaming_mode != StreamingMode .NONE
519+ )
520+
521+ return ExtractorResponse (content = is_streaming )
517522
518523
519524def llm_gen_ai_operation_name (params : LLMAttributesParams ) -> ExtractorResponse :
@@ -805,6 +810,24 @@ def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorRespon
805810 return ExtractorResponse (content = functions )
806811
807812
813+ def llm_server_address (params : LLMAttributesParams ) -> ExtractorResponse :
814+ """Extract the LLM server address (model API base URL).
815+
816+ Returns the model API base URL configured on the current Agent.
817+ If the Agent or base URL is unavailable, returns 'unknown' to
818+ keep the span attribute consistent.
819+
820+ Args:
821+ params: LLM execution parameters containing invocation context
822+
823+ Returns:
824+ ExtractorResponse: Response containing the server address or 'unknown'
825+ """
826+ return ExtractorResponse (
827+ content = getattr (params .invocation_context .agent , "model_api_base" , "unknown" )
828+ )
829+
830+
808831LLM_ATTRIBUTES = {
809832 # -> 1. attributes
810833 # -> 1.1. request
@@ -813,6 +836,7 @@ def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorRespon
813836 "gen_ai.request.max_tokens" : llm_gen_ai_request_max_tokens ,
814837 "gen_ai.request.temperature" : llm_gen_ai_request_temperature ,
815838 "gen_ai.request.top_p" : llm_gen_ai_request_top_p ,
839+ "server.address" : llm_server_address ,
816840 # CozeLoop required
817841 "gen_ai.request.functions" : llm_gen_ai_request_functions ,
818842 # -> 1.2. response
0 commit comments