Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 12 additions & 10 deletions src/strands/event_loop/streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,16 +288,8 @@ def handle_content_block_stop(state: dict[str, Any]) -> dict[str, Any]:
content.append({"toolUse": tool_use})
state["current_tool_use"] = {}

elif text:
if citations_content:
citations_block: CitationsContentBlock = {"citations": citations_content, "content": [{"text": text}]}
content.append({"citationsContent": citations_block})
state["citationsContent"] = []
else:
content.append({"text": text})
state["text"] = ""

elif reasoning_text:
# Handle reasoning content - checked independently of text so both can be captured
if reasoning_text:
content_block: ContentBlock = {
"reasoningContent": {
"reasoningText": {
Expand All @@ -315,6 +307,16 @@ def handle_content_block_stop(state: dict[str, Any]) -> dict[str, Any]:
content.append({"reasoningContent": {"redactedContent": redacted_content}})
state["redactedContent"] = b""

# Handle text content - checked after reasoning so both can be captured in the same block
if text:
if citations_content:
citations_block: CitationsContentBlock = {"citations": citations_content, "content": [{"text": text}]}
content.append({"citationsContent": citations_block})
state["citationsContent"] = []
else:
content.append({"text": text})
state["text"] = ""

return state


Expand Down
167 changes: 167 additions & 0 deletions tests/strands/event_loop/test_streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,6 +481,90 @@ def test_handle_content_block_delta(event: ContentBlockDeltaEvent, event_type, s
"redactedContent": b"",
},
),
# Reasoning AND Text - both should be captured (Gemini thinking mode)
(
{
"content": [],
"current_tool_use": {},
"text": "2 + 2 = 4",
"reasoningText": "Let me calculate this simple math problem.",
"citationsContent": [],
"redactedContent": b"",
},
{
"content": [
{"reasoningContent": {"reasoningText": {"text": "Let me calculate this simple math problem."}}},
{"text": "2 + 2 = 4"},
],
"current_tool_use": {},
"text": "",
"reasoningText": "",
"citationsContent": [],
"redactedContent": b"",
},
),
# Reasoning AND Text with signature - both should be captured
(
{
"content": [],
"current_tool_use": {},
"text": "The answer is 4",
"reasoningText": "Thinking about the math",
"signature": "test-sig",
"citationsContent": [],
"redactedContent": b"",
},
{
"content": [
{
"reasoningContent": {
"reasoningText": {"text": "Thinking about the math", "signature": "test-sig"}
}
},
{"text": "The answer is 4"},
],
"current_tool_use": {},
"text": "",
"reasoningText": "",
"signature": "test-sig",
"citationsContent": [],
"redactedContent": b"",
},
),
# Reasoning AND Text with Citations - all should be captured
(
{
"content": [],
"current_tool_use": {},
"text": "According to the source",
"reasoningText": "I need to cite the source",
"citationsContent": [
{"location": {"documentChar": {"documentIndex": 0, "start": 0, "end": 10}}, "title": "Source"}
],
"redactedContent": b"",
},
{
"content": [
{"reasoningContent": {"reasoningText": {"text": "I need to cite the source"}}},
{
"citationsContent": {
"citations": [
{
"location": {"documentChar": {"documentIndex": 0, "start": 0, "end": 10}},
"title": "Source",
}
],
"content": [{"text": "According to the source"}],
}
},
],
"current_tool_use": {},
"text": "",
"reasoningText": "",
"citationsContent": [],
"redactedContent": b"",
},
),
],
)
def test_handle_content_block_stop(state, exp_updated_state):
Expand Down Expand Up @@ -982,6 +1066,89 @@ def _get_message_from_event(event: ModelStopReason) -> Message:
return cast(Message, event["stop"][1])


@pytest.mark.asyncio
async def test_process_stream_reasoning_and_text_same_block(agenerator, alist):
response = [
{"messageStart": {"role": "assistant"}},
{"contentBlockStart": {"start": {}}},
{
"contentBlockDelta": {
"delta": {"reasoningContent": {"text": "Let me calculate this..."}},
"contentBlockIndex": 0,
}
},
{
"contentBlockDelta": {
"delta": {"text": "2 + 2 = 4"},
"contentBlockIndex": 0,
}
},
{"contentBlockStop": {"contentBlockIndex": 0}},
{"messageStop": {"stopReason": "end_turn"}},
{
"metadata": {
"usage": {"inputTokens": 10, "outputTokens": 20, "totalTokens": 30},
"metrics": {"latencyMs": 100},
}
},
]

stream = strands.event_loop.streaming.process_stream(agenerator(response))

last_event = cast(ModelStopReason, (await alist(stream))[-1])

message = _get_message_from_event(last_event)

assert len(message["content"]) == 2
assert message["content"][0]["reasoningContent"]["reasoningText"]["text"] == "Let me calculate this..."
assert message["content"][1]["text"] == "2 + 2 = 4"


@pytest.mark.asyncio
async def test_process_stream_reasoning_and_text_same_block_with_signature(agenerator, alist):
response = [
{"messageStart": {"role": "assistant"}},
{"contentBlockStart": {"start": {}}},
{
"contentBlockDelta": {
"delta": {"reasoningContent": {"text": "Thinking about this..."}},
"contentBlockIndex": 0,
}
},
{
"contentBlockDelta": {
"delta": {"reasoningContent": {"signature": "test-signature"}},
"contentBlockIndex": 0,
}
},
{
"contentBlockDelta": {
"delta": {"text": "The answer is 42"},
"contentBlockIndex": 0,
}
},
{"contentBlockStop": {"contentBlockIndex": 0}},
{"messageStop": {"stopReason": "end_turn"}},
{
"metadata": {
"usage": {"inputTokens": 10, "outputTokens": 20, "totalTokens": 30},
"metrics": {"latencyMs": 100},
}
},
]

stream = strands.event_loop.streaming.process_stream(agenerator(response))

last_event = cast(ModelStopReason, (await alist(stream))[-1])

message = _get_message_from_event(last_event)

assert len(message["content"]) == 2
assert message["content"][0]["reasoningContent"]["reasoningText"]["text"] == "Thinking about this..."
assert message["content"][0]["reasoningContent"]["reasoningText"]["signature"] == "test-signature"
assert message["content"][1]["text"] == "The answer is 42"


@pytest.mark.asyncio
async def test_process_stream_with_no_signature(agenerator, alist):
response = [
Expand Down
32 changes: 32 additions & 0 deletions tests_integ/models/test_model_gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,3 +202,35 @@ def test_agent_with_gemini_code_execution_tool(gemini_tool_model):

result_turn2 = agent("Summarize that into a single number")
assert "5117" in str(result_turn2)


def test_agent_with_thinking_captures_reasoning_content():
model = GeminiModel(
client_args={"api_key": os.getenv("GOOGLE_API_KEY")},
model_id="gemini-2.5-flash",
params={
"thinking_config": {
"thinking_budget": 1024,
"include_thoughts": True,
},
},
)

agent = Agent(model=model)

agent("What is 2+2?")

last_message = agent.messages[-1]
content = last_message["content"]

has_reasoning = any("reasoningContent" in block for block in content)
has_text = any("text" in block for block in content)

assert has_text, "Text content should be present in the message"
assert has_reasoning, "Reasoning content should be captured when thinking mode is enabled"

reasoning_indices = [i for i, block in enumerate(content) if "reasoningContent" in block]
text_indices = [i for i, block in enumerate(content) if "text" in block]

if reasoning_indices and text_indices:
assert min(reasoning_indices) < min(text_indices), "Reasoning should appear before text content"