Skip to content

Commit ba727b3

Browse files
fix(google-genai): Remove agent spans for simple requests
1 parent 8f80fa7 commit ba727b3

File tree

2 files changed

+46
-137
lines changed

2 files changed

+46
-137
lines changed

sentry_sdk/integrations/google_genai/__init__.py

Lines changed: 37 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -73,17 +73,6 @@ def new_generate_content_stream(
7373

7474
_model, contents, model_name = prepare_generate_content_args(args, kwargs)
7575

76-
span = get_start_span_function()(
77-
op=OP.GEN_AI_INVOKE_AGENT,
78-
name="invoke_agent",
79-
origin=ORIGIN,
80-
)
81-
span.__enter__()
82-
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
83-
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
84-
set_span_data_for_request(span, integration, model_name, contents, kwargs)
85-
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
86-
8776
chat_span = sentry_sdk.start_span(
8877
op=OP.GEN_AI_CHAT,
8978
name=f"chat {model_name}",
@@ -118,18 +107,13 @@ def new_iterator() -> "Iterator[Any]":
118107
set_span_data_for_streaming_response(
119108
chat_span, integration, accumulated_response
120109
)
121-
set_span_data_for_streaming_response(
122-
span, integration, accumulated_response
123-
)
124110
chat_span.__exit__(None, None, None)
125-
span.__exit__(None, None, None)
126111

127112
return new_iterator()
128113

129114
except Exception as exc:
130115
_capture_exception(exc)
131116
chat_span.__exit__(None, None, None)
132-
span.__exit__(None, None, None)
133117
raise
134118

135119
return new_generate_content_stream
@@ -148,17 +132,6 @@ async def new_async_generate_content_stream(
148132

149133
_model, contents, model_name = prepare_generate_content_args(args, kwargs)
150134

151-
span = get_start_span_function()(
152-
op=OP.GEN_AI_INVOKE_AGENT,
153-
name="invoke_agent",
154-
origin=ORIGIN,
155-
)
156-
span.__enter__()
157-
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
158-
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
159-
set_span_data_for_request(span, integration, model_name, contents, kwargs)
160-
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
161-
162135
chat_span = sentry_sdk.start_span(
163136
op=OP.GEN_AI_CHAT,
164137
name=f"chat {model_name}",
@@ -193,18 +166,13 @@ async def new_async_iterator() -> "AsyncIterator[Any]":
193166
set_span_data_for_streaming_response(
194167
chat_span, integration, accumulated_response
195168
)
196-
set_span_data_for_streaming_response(
197-
span, integration, accumulated_response
198-
)
199169
chat_span.__exit__(None, None, None)
200-
span.__exit__(None, None, None)
201170

202171
return new_async_iterator()
203172

204173
except Exception as exc:
205174
_capture_exception(exc)
206175
chat_span.__exit__(None, None, None)
207-
span.__exit__(None, None, None)
208176
raise
209177

210178
return new_async_generate_content_stream
@@ -219,39 +187,29 @@ def new_generate_content(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
219187

220188
model, contents, model_name = prepare_generate_content_args(args, kwargs)
221189

222-
with get_start_span_function()(
223-
op=OP.GEN_AI_INVOKE_AGENT,
224-
name="invoke_agent",
190+
with sentry_sdk.start_span(
191+
op=OP.GEN_AI_CHAT,
192+
name=f"chat {model_name}",
225193
origin=ORIGIN,
226-
) as span:
227-
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
228-
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
229-
set_span_data_for_request(span, integration, model_name, contents, kwargs)
230-
231-
with sentry_sdk.start_span(
232-
op=OP.GEN_AI_CHAT,
233-
name=f"chat {model_name}",
234-
origin=ORIGIN,
235-
) as chat_span:
236-
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
237-
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
238-
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
239-
chat_span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
240-
set_span_data_for_request(
241-
chat_span, integration, model_name, contents, kwargs
242-
)
194+
) as chat_span:
195+
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
196+
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
197+
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
198+
chat_span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
199+
set_span_data_for_request(
200+
chat_span, integration, model_name, contents, kwargs
201+
)
243202

244-
try:
245-
response = f(self, *args, **kwargs)
246-
except Exception as exc:
247-
_capture_exception(exc)
248-
chat_span.set_status(SPANSTATUS.INTERNAL_ERROR)
249-
raise
203+
try:
204+
response = f(self, *args, **kwargs)
205+
except Exception as exc:
206+
_capture_exception(exc)
207+
chat_span.set_status(SPANSTATUS.INTERNAL_ERROR)
208+
raise
250209

251-
set_span_data_for_response(chat_span, integration, response)
252-
set_span_data_for_response(span, integration, response)
210+
set_span_data_for_response(chat_span, integration, response)
253211

254-
return response
212+
return response
255213

256214
return new_generate_content
257215

@@ -267,37 +225,27 @@ async def new_async_generate_content(
267225

268226
model, contents, model_name = prepare_generate_content_args(args, kwargs)
269227

270-
with get_start_span_function()(
271-
op=OP.GEN_AI_INVOKE_AGENT,
272-
name="invoke_agent",
228+
with sentry_sdk.start_span(
229+
op=OP.GEN_AI_CHAT,
230+
name=f"chat {model_name}",
273231
origin=ORIGIN,
274-
) as span:
275-
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
276-
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
277-
set_span_data_for_request(span, integration, model_name, contents, kwargs)
278-
279-
with sentry_sdk.start_span(
280-
op=OP.GEN_AI_CHAT,
281-
name=f"chat {model_name}",
282-
origin=ORIGIN,
283-
) as chat_span:
284-
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
285-
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
286-
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
287-
set_span_data_for_request(
288-
chat_span, integration, model_name, contents, kwargs
289-
)
290-
try:
291-
response = await f(self, *args, **kwargs)
292-
except Exception as exc:
293-
_capture_exception(exc)
294-
chat_span.set_status(SPANSTATUS.INTERNAL_ERROR)
295-
raise
232+
) as chat_span:
233+
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
234+
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
235+
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
236+
set_span_data_for_request(
237+
chat_span, integration, model_name, contents, kwargs
238+
)
239+
try:
240+
response = await f(self, *args, **kwargs)
241+
except Exception as exc:
242+
_capture_exception(exc)
243+
chat_span.set_status(SPANSTATUS.INTERNAL_ERROR)
244+
raise
296245

297-
set_span_data_for_response(chat_span, integration, response)
298-
set_span_data_for_response(span, integration, response)
246+
set_span_data_for_response(chat_span, integration, response)
299247

300-
return response
248+
return response
301249

302250
return new_async_generate_content
303251

tests/integrations/google_genai/test_google_genai.py

Lines changed: 9 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -152,17 +152,8 @@ def test_nonstreaming_generate_content(
152152
assert event["type"] == "transaction"
153153
assert event["transaction"] == "google_genai"
154154

155-
# Should have 2 spans: invoke_agent and chat
156-
assert len(event["spans"]) == 2
157-
invoke_span, chat_span = event["spans"]
158-
159-
# Check invoke_agent span
160-
assert invoke_span["op"] == OP.GEN_AI_INVOKE_AGENT
161-
assert invoke_span["description"] == "invoke_agent"
162-
assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "gemini-1.5-flash"
163-
assert invoke_span["data"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini"
164-
assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash"
165-
assert invoke_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent"
155+
assert len(event["spans"]) == 1
156+
chat_span = event["spans"][0]
166157

167158
# Check chat span
168159
assert chat_span["op"] == OP.GEN_AI_CHAT
@@ -172,18 +163,12 @@ def test_nonstreaming_generate_content(
172163
assert chat_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash"
173164

174165
if send_default_pii and include_prompts:
175-
# Messages are serialized as JSON strings
176-
messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES])
177-
assert messages == [{"role": "user", "content": "Tell me a joke"}]
178-
179166
# Response text is stored as a JSON array
180167
response_text = chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]
181168
# Parse the JSON array
182169
response_texts = json.loads(response_text)
183170
assert response_texts == ["Hello! How can I help you today?"]
184171
else:
185-
assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in invoke_span["data"]
186-
assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span["data"]
187172
assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_span["data"]
188173

189174
# Check token usage
@@ -194,10 +179,6 @@ def test_nonstreaming_generate_content(
194179
assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5
195180
assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3
196181

197-
# Check configuration parameters
198-
assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7
199-
assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100
200-
201182

202183
@pytest.mark.parametrize("generate_content_config", (False, True))
203184
@pytest.mark.parametrize(
@@ -519,50 +500,30 @@ def test_streaming_generate_content(sentry_init, capture_events, mock_genai_clie
519500

520501
(event,) = events
521502

522-
# There should be 2 spans: invoke_agent and chat
523-
assert len(event["spans"]) == 2
524-
invoke_span = event["spans"][0]
525-
chat_span = event["spans"][1]
503+
assert len(event["spans"]) == 1
504+
chat_span = event["spans"][0]
526505

527506
# Check that streaming flag is set on both spans
528-
assert invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True
529507
assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True
530508

531509
# Verify accumulated response text (all chunks combined)
532510
expected_full_text = "Hello! How can I help you today?"
533511
# Response text is stored as a JSON string
534512
chat_response_text = json.loads(chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT])
535-
invoke_response_text = json.loads(
536-
invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]
537-
)
538513
assert chat_response_text == [expected_full_text]
539-
assert invoke_response_text == [expected_full_text]
540514

541515
# Verify finish reasons (only the final chunk has a finish reason)
542516
# When there's a single finish reason, it's stored as a plain string (not JSON)
543517
assert SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS in chat_span["data"]
544-
assert SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS in invoke_span["data"]
545518
assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == "STOP"
546-
assert invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == "STOP"
547-
548519
assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10
549-
assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10
550-
551520
assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10
552-
assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10
553-
554521
assert chat_span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 25
555-
assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 25
556-
557522
assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5
558-
assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5
559-
560523
assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3
561-
assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3
562524

563525
# Verify model name
564526
assert chat_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash"
565-
assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "gemini-1.5-flash"
566527

567528

568529
def test_span_origin(sentry_init, capture_events, mock_genai_client):
@@ -625,7 +586,7 @@ def test_response_without_usage_metadata(
625586
)
626587

627588
(event,) = events
628-
chat_span = event["spans"][1]
589+
chat_span = event["spans"][0]
629590

630591
# Usage data should not be present
631592
assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in chat_span["data"]
@@ -679,7 +640,7 @@ def test_multiple_candidates(sentry_init, capture_events, mock_genai_client):
679640
)
680641

681642
(event,) = events
682-
chat_span = event["spans"][1]
643+
chat_span = event["spans"][0]
683644

684645
# Should capture all responses
685646
# Response text is stored as a JSON string when there are multiple responses
@@ -765,7 +726,7 @@ def test_empty_response(sentry_init, capture_events, mock_genai_client):
765726

766727
(event,) = events
767728
# Should still create spans even with empty candidates
768-
assert len(event["spans"]) == 2
729+
assert len(event["spans"]) == 1
769730

770731

771732
def test_response_with_different_id_fields(
@@ -804,7 +765,7 @@ def test_response_with_different_id_fields(
804765
)
805766

806767
(event,) = events
807-
chat_span = event["spans"][1]
768+
chat_span = event["spans"][0]
808769

809770
assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "resp-456"
810771
assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gemini-1.5-flash-001"
@@ -916,7 +877,7 @@ def test_tool_calls_extraction(sentry_init, capture_events, mock_genai_client):
916877
)
917878

918879
(event,) = events
919-
chat_span = event["spans"][1] # The chat span
880+
chat_span = event["spans"][0] # The chat span
920881

921882
# Check that tool calls are extracted and stored
922883
assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_span["data"]

0 commit comments

Comments
 (0)