diff --git a/pyproject.toml b/pyproject.toml index c3459d3f..28e699d3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "uipath-langchain" -version = "0.5.74" +version = "0.5.75" description = "Python SDK that enables developers to build and deploy LangGraph agents to the UiPath Cloud Platform" readme = { file = "README.md", content-type = "text/markdown" } requires-python = ">=3.11" diff --git a/src/uipath_langchain/agent/react/init_node.py b/src/uipath_langchain/agent/react/init_node.py index fee78798..088fccf2 100644 --- a/src/uipath_langchain/agent/react/init_node.py +++ b/src/uipath_langchain/agent/react/init_node.py @@ -51,11 +51,19 @@ def graph_state_init(state: Any) -> Any: ) job_attachments_dict.update(message_attachments) + # Calculate initial message count for tracking new messages + initial_message_count = ( + len(resolved_messages.value) + if isinstance(resolved_messages, Overwrite) + else len(resolved_messages) + ) + return { "messages": resolved_messages, "inner_state": { "job_attachments": job_attachments_dict, "agent_settings": agent_settings, + "initial_message_count": initial_message_count, }, } diff --git a/src/uipath_langchain/agent/react/terminate_node.py b/src/uipath_langchain/agent/react/terminate_node.py index c7591a06..dfcba352 100644 --- a/src/uipath_langchain/agent/react/terminate_node.py +++ b/src/uipath_langchain/agent/react/terminate_node.py @@ -7,8 +7,10 @@ from langchain_core.messages import AIMessage from pydantic import BaseModel from uipath.agent.react import END_EXECUTION_TOOL, RAISE_ERROR_TOOL +from uipath.core.chat import UiPathConversationMessageData from uipath.runtime.errors import UiPathErrorCategory +from ...runtime.messages import UiPathChatMessagesMapper from ..exceptions import AgentRuntimeError, AgentRuntimeErrorCode from .types import AgentGraphState @@ -34,18 +36,65 @@ def _handle_raise_error(args: dict[str, Any]) -> NoReturn: ) +def _handle_end_conversational( + state: AgentGraphState, response_schema: type[BaseModel] | None +) -> dict[str, Any]: + """Handle conversational agent termination by returning converted messages.""" + if state.inner_state.initial_message_count is None: + raise AgentRuntimeError( + code=AgentRuntimeErrorCode.STATE_ERROR, + title="No initial message count in state for conversational agent execution.", + detail="Initial message count must be set in inner_state for conversational agent execution.", + category=UiPathErrorCategory.SYSTEM, + ) + + if response_schema is None: + raise AgentRuntimeError( + code=AgentRuntimeErrorCode.STATE_ERROR, + title="No response schema for conversational agent termination.", + detail="Response schema must be provided for termination of conversational agent execution.", + category=UiPathErrorCategory.SYSTEM, + ) + + initial_count = state.inner_state.initial_message_count + new_messages = state.messages[initial_count:] + + converted_messages: list[UiPathConversationMessageData] = [] + + # For the agent-output messages, don't include tool-results. Just include agent's LLM outputs and tool-calls + inputs. + # This is primarily since evaluations don't check for tool-results; this output represents the agent's actual choices rather than tool-results. + if new_messages: + converted_messages = ( + UiPathChatMessagesMapper.map_langchain_messages_to_uipath_message_data_list( + messages=new_messages, include_tool_results=False + ) + ) + + output = { + "uipath__agent_response_messages": [ + msg.model_dump(by_alias=True) for msg in converted_messages + ] + } + validated = response_schema.model_validate(output) + return validated.model_dump(by_alias=True) + + def create_terminate_node( - response_schema: type[BaseModel] | None = None, is_conversational: bool = False + response_schema: type[BaseModel] | None = None, + is_conversational: bool = False, ): """Handles Agent Graph termination for multiple sources and output or error propagation to Orchestrator. Termination scenarios: 1. LLM-initiated termination (END_EXECUTION_TOOL) 2. LLM-initiated error (RAISE_ERROR_TOOL) + 3. End of conversational loop """ def terminate_node(state: AgentGraphState): - if not is_conversational: + if is_conversational: + return _handle_end_conversational(state, response_schema) + else: last_message = state.messages[-1] if not isinstance(last_message, AIMessage): raise AgentRuntimeError( diff --git a/src/uipath_langchain/agent/react/types.py b/src/uipath_langchain/agent/react/types.py index 5ec0e27c..502ff62c 100644 --- a/src/uipath_langchain/agent/react/types.py +++ b/src/uipath_langchain/agent/react/types.py @@ -26,6 +26,7 @@ class AgentSettings(BaseModel): class InnerAgentGraphState(BaseModel): job_attachments: Annotated[dict[str, Attachment], merge_dicts] = {} agent_settings: AgentSettings | None = None + initial_message_count: int | None = None tools_storage: Annotated[dict[Hashable, Any], merge_dicts] = {} diff --git a/src/uipath_langchain/runtime/messages.py b/src/uipath_langchain/runtime/messages.py index 1164fcd0..229df615 100644 --- a/src/uipath_langchain/runtime/messages.py +++ b/src/uipath_langchain/runtime/messages.py @@ -8,6 +8,7 @@ from langchain_core.messages import ( AIMessage, AIMessageChunk, + AnyMessage, BaseMessage, ContentBlock, HumanMessage, @@ -19,15 +20,19 @@ from pydantic import ValidationError from uipath.core.chat import ( UiPathConversationContentPartChunkEvent, + UiPathConversationContentPartData, UiPathConversationContentPartEndEvent, UiPathConversationContentPartEvent, UiPathConversationContentPartStartEvent, UiPathConversationMessage, + UiPathConversationMessageData, UiPathConversationMessageEndEvent, UiPathConversationMessageEvent, UiPathConversationMessageStartEvent, + UiPathConversationToolCallData, UiPathConversationToolCallEndEvent, UiPathConversationToolCallEvent, + UiPathConversationToolCallResult, UiPathConversationToolCallStartEvent, UiPathExternalValue, UiPathInlineValue, @@ -55,7 +60,8 @@ def __init__(self, runtime_id: str, storage: UiPathRuntimeStorageProtocol | None self.seen_message_ids: set[str] = set() self._storage_lock = asyncio.Lock() - def _extract_text(self, content: Any) -> str: + @staticmethod + def _extract_text(content: Any) -> str: """Normalize LangGraph message.content to plain text.""" if isinstance(content, str): return content @@ -223,7 +229,7 @@ def _map_messages_internal( AIMessage( id=uipath_message.message_id, # content_blocks=content_blocks, - content=self._extract_text(content_blocks) + content=UiPathChatMessagesMapper._extract_text(content_blocks) if content_blocks else "", tool_calls=tool_calls, @@ -544,5 +550,131 @@ def map_to_message_end_event( ), ) + # Static methods for mapping langchain messages to uipath message types + + @staticmethod + def map_langchain_messages_to_uipath_message_data_list( + messages: list[AnyMessage], include_tool_results: bool = True + ) -> list[UiPathConversationMessageData]: + """Convert LangChain messages to UiPathConversationMessageData format. include_tool_results controls whether to include tool call results from ToolMessage instances in the output agent-messages.""" + + # Build map of tool_call_id -> ToolMessage lookup, if tool-results should be included + tool_messages_map = ( + UiPathChatMessagesMapper._build_langchain_tool_messages_map(messages) + if include_tool_results + else None + ) + + converted_messages: list[UiPathConversationMessageData] = [] + + for message in messages: + if isinstance(message, HumanMessage): + converted_messages.append( + UiPathChatMessagesMapper._map_langchain_human_message_to_uipath_message_data( + message + ) + ) + elif isinstance(message, AIMessage): + converted_messages.append( + UiPathChatMessagesMapper._map_langchain_ai_message_to_uipath_message_data( + message, tool_messages_map + ) + ) + + return converted_messages + + @staticmethod + def _build_langchain_tool_messages_map( + messages: list[AnyMessage], + ) -> dict[str, ToolMessage]: + """Create mapping of tool_call_id -> ToolMessage for efficient lookup.""" + tool_map: dict[str, ToolMessage] = {} + for msg in messages: + if isinstance(msg, ToolMessage) and msg.tool_call_id: + tool_map[msg.tool_call_id] = msg + return tool_map + + @staticmethod + def _parse_langchain_tool_result(content: Any) -> Any: + """Attempt to parse JSON result back to dict (reverse of json.dumps).""" + if not content or not isinstance(content, str): + return content + + try: + return json.loads(content) + except (json.JSONDecodeError, TypeError): + # Not valid JSON, return as string + return content + + @staticmethod + def _map_langchain_human_message_to_uipath_message_data( + message: HumanMessage, + ) -> UiPathConversationMessageData: + """Convert HumanMessage to UiPathConversationMessageData.""" + + text_content = UiPathChatMessagesMapper._extract_text(message.content) + content_parts: list[UiPathConversationContentPartData] = [] + if text_content: + content_parts.append( + UiPathConversationContentPartData( + mime_type="text/plain", + data=UiPathInlineValue(inline=text_content), + citations=[], + ) + ) + + return UiPathConversationMessageData( + role="user", content_parts=content_parts, tool_calls=[], interrupts=[] + ) + + @staticmethod + def _map_langchain_ai_message_to_uipath_message_data( + message: AIMessage, tool_message_map: dict[str, ToolMessage] | None + ) -> UiPathConversationMessageData: + """Convert AIMessage to UiPathConversationMessageData with embedded tool-calls. When tool_message_map is passed in, tool results are matched by tool-call ID and included.""" + + content_parts: list[UiPathConversationContentPartData] = [] + text_content = UiPathChatMessagesMapper._extract_text(message.content) + if text_content: + content_parts.append( + UiPathConversationContentPartData( + mime_type="text/markdown", + data=UiPathInlineValue(inline=text_content), + citations=[], # TODO: Citations + ) + ) + + # Convert tool_calls + uipath_tool_calls: list[UiPathConversationToolCallData] = [] + if message.tool_calls: + for tool_call in message.tool_calls: + uipath_tool_call = UiPathConversationToolCallData( + name=tool_call["name"], input=tool_call.get("args", {}) + ) + + if tool_message_map and tool_call["id"]: + # Find corresponding ToolMessage and build tool-call result if found + tool_message = tool_message_map.get(tool_call["id"]) + result = None + if tool_message: + # Parse JSON result back to dict + output = UiPathChatMessagesMapper._parse_langchain_tool_result( + tool_message.content + ) + result = UiPathConversationToolCallResult( + output=output, + is_error=tool_message.status == "error", + ) + uipath_tool_call.result = result + + uipath_tool_calls.append(uipath_tool_call) + + return UiPathConversationMessageData( + role="assistant", + content_parts=content_parts, + tool_calls=uipath_tool_calls, + interrupts=[], # TODO: Interrupts + ) + __all__ = ["UiPathChatMessagesMapper"] diff --git a/tests/agent/react/test_init_node.py b/tests/agent/react/test_init_node.py index 1bf22ae4..b9c9919f 100644 --- a/tests/agent/react/test_init_node.py +++ b/tests/agent/react/test_init_node.py @@ -246,3 +246,37 @@ def test_conversational_merges_attachments_from_preserved_messages(self): assert attachment_id in job_attachments assert job_attachments[attachment_id].full_name == "document.pdf" assert job_attachments[attachment_id].mime_type == "application/pdf" + + def test_initial_message_count_in_non_conversational_mode(self): + """Non-conversational mode should set initial_message_count.""" + messages: list[SystemMessage | HumanMessage] = [ + SystemMessage(content="System"), + HumanMessage(content="Query"), + ] + init_node = create_init_node( + messages, input_schema=None, is_conversational=False + ) + state = MockState(messages=[]) + + result = init_node(state) + + assert "initial_message_count" in result["inner_state"] + # In non-conversational mode, messages is a list + assert result["inner_state"]["initial_message_count"] == 2 + + def test_initial_message_count_in_conversational_mode(self): + """Conversational mode should set initial_message_count based on Overwrite.""" + messages: list[SystemMessage | HumanMessage] = [ + SystemMessage(content="System"), + HumanMessage(content="Query"), + HumanMessage(content="Query2"), + ] + init_node = create_init_node( + messages, input_schema=None, is_conversational=True + ) + state = MockState(messages=[]) + + result = init_node(state) + + assert "initial_message_count" in result["inner_state"] + assert result["inner_state"]["initial_message_count"] == 3 diff --git a/tests/agent/react/test_terminate_node.py b/tests/agent/react/test_terminate_node.py index 7fa1213c..088f5d1b 100644 --- a/tests/agent/react/test_terminate_node.py +++ b/tests/agent/react/test_terminate_node.py @@ -3,9 +3,10 @@ from typing import Any import pytest -from langchain_core.messages import AIMessage, HumanMessage +from langchain_core.messages import AIMessage, HumanMessage, ToolMessage from pydantic import BaseModel from uipath.agent.react import END_EXECUTION_TOOL, RAISE_ERROR_TOOL +from uipath.core.chat import UiPathConversationMessageData from uipath_langchain.agent.exceptions import ( AgentRuntimeError, @@ -18,6 +19,7 @@ class MockInnerState(BaseModel): """Mock inner state for testing.""" job_attachments: dict[str, Any] = {} + initial_message_count: int | None = None class MockAgentGraphState(BaseModel): @@ -30,44 +32,154 @@ class MockAgentGraphState(BaseModel): class TestTerminateNodeConversational: """Test cases for create_terminate_node with is_conversational=True.""" - @pytest.fixture - def terminate_node(self): - """Fixture for conversational terminate node.""" - return create_terminate_node(response_schema=None, is_conversational=True) + def test_conversational_requires_response_schema(self): + """Conversational mode should raise error if no response_schema provided.""" - @pytest.fixture - def state_with_ai_message(self): - """Fixture for state with AI message (no tool calls).""" - return MockAgentGraphState( - messages=[AIMessage(content="Here is my response to your question.")] + terminate_node_no_schema = create_terminate_node( + response_schema=None, is_conversational=True + ) + state = MockAgentGraphState( + messages=[ + HumanMessage(content="Initial message"), + AIMessage(content="Response"), + ], + inner_state=MockInnerState(initial_message_count=1), ) - @pytest.fixture - def state_with_human_message(self): - """Fixture for state with human message as last.""" - return MockAgentGraphState(messages=[HumanMessage(content="User message")]) + with pytest.raises(AgentRuntimeError) as exc_info: + terminate_node_no_schema(state) - def test_conversational_returns_none_no_tool_calls( - self, terminate_node, state_with_ai_message - ): - """Conversational mode should return None when AI has no tool calls.""" - result = terminate_node(state_with_ai_message) + assert "No response schema" in exc_info.value.error_info.title - assert result is None + def test_conversational_requires_initial_message_count(self): + """Conversational mode should raise error if initial_message_count not set.""" - def test_conversational_skips_ai_message_validation( - self, terminate_node, state_with_human_message - ): - """Conversational mode should not validate that last message is AIMessage.""" - # This should not raise, unlike non-conversational mode - result = terminate_node(state_with_human_message) + class ResponseSchema(BaseModel): + uipath__agent_response_messages: list[UiPathConversationMessageData] + + terminate_node = create_terminate_node( + response_schema=ResponseSchema, is_conversational=True + ) + state = MockAgentGraphState( + messages=[AIMessage(content="Response")], + inner_state=MockInnerState(initial_message_count=None), + ) + + with pytest.raises(AgentRuntimeError) as exc_info: + terminate_node(state) + + assert "No initial message count" in exc_info.value.error_info.title + + def test_conversational_returns_converted_messages(self): + """Conversational mode should return converted new messages.""" + + class ResponseSchema(BaseModel): + uipath__agent_response_messages: list[UiPathConversationMessageData] + + terminate_node = create_terminate_node( + response_schema=ResponseSchema, is_conversational=True + ) + + # Create state with initial message count of 2, and 3 total messages + # So only the last message should be converted + state = MockAgentGraphState( + messages=[ + HumanMessage(content="Initial user message"), + AIMessage(content="Initial AI response"), + AIMessage(content="New AI response"), + ], + inner_state=MockInnerState(initial_message_count=2), + ) + + result = terminate_node(state) - assert result is None + assert "uipath__agent_response_messages" in result + messages = result["uipath__agent_response_messages"] + + # Should have 1 message (only the new one after initial_message_count) + assert len(messages) == 1 + assert messages[0]["role"] == "assistant" + assert len(messages[0]["contentParts"]) == 1 + assert messages[0]["contentParts"][0]["mimeType"] == "text/markdown" + assert "New AI response" in str(messages[0]["contentParts"][0]["data"]) + + def test_conversational_handles_multiple_new_messages(self): + """Conversational mode should convert all messages after initial count.""" + + class ResponseSchema(BaseModel): + uipath__agent_response_messages: list[UiPathConversationMessageData] + + terminate_node = create_terminate_node( + response_schema=ResponseSchema, is_conversational=True + ) + + # Initial count is 1, so messages at index 1+ are new + state = MockAgentGraphState( + messages=[ + HumanMessage(content="Initial message"), + AIMessage(content="First new response"), + AIMessage(content="Second new response"), + ], + inner_state=MockInnerState(initial_message_count=1), + ) + + result = terminate_node(state) + + messages = result["uipath__agent_response_messages"] + assert len(messages) == 2 + assert messages[0]["role"] == "assistant" + assert "First new response" in str(messages[0]["contentParts"][0]["data"]) + assert messages[1]["role"] == "assistant" + assert "Second new response" in str(messages[1]["contentParts"][0]["data"]) + + def test_conversational_with_tool_calls_excludes_tool_results(self): + """Conversational mode should exclude tool results in output.""" + + class ResponseSchema(BaseModel): + uipath__agent_response_messages: list[UiPathConversationMessageData] + + terminate_node = create_terminate_node( + response_schema=ResponseSchema, is_conversational=True + ) + + # Initial count is 1 + state = MockAgentGraphState( + messages=[ + HumanMessage(content="Initial"), + AIMessage( + content="Using tool", + tool_calls=[ + {"name": "test_tool", "args": {"param": "value"}, "id": "call1"} + ], + ), + ToolMessage(content="Tool result", tool_call_id="call1"), + ], + inner_state=MockInnerState(initial_message_count=1), + ) + + result = terminate_node(state) + + print(result) + + messages = result["uipath__agent_response_messages"] + # Should have AI message with tool calls, but NOT the ToolMessage + # The mapper with include_tool_results=False should only return AI messages + assert len(messages) == 1 + assert messages[0]["role"] == "assistant" + assert "Using tool" in str(messages[0]["contentParts"][0]["data"]) + # Verify tool calls are present in the message + assert len(messages[0]["toolCalls"]) == 1 + assert messages[0]["toolCalls"][0]["name"] == "test_tool" + assert messages[0]["toolCalls"][0]["input"] == {"param": "value"} def test_conversational_ignores_end_execution_tool(self): """Conversational mode should ignore END_EXECUTION tool calls.""" + + class ResponseSchema(BaseModel): + uipath__agent_response_messages: list[UiPathConversationMessageData] + terminate_node = create_terminate_node( - response_schema=None, is_conversational=True + response_schema=ResponseSchema, is_conversational=True ) ai_message = AIMessage( content="Done", @@ -79,12 +191,19 @@ def test_conversational_ignores_end_execution_tool(self): } ], ) - state = MockAgentGraphState(messages=[ai_message]) + state = MockAgentGraphState( + messages=[HumanMessage(content="Initial"), ai_message], + inner_state=MockInnerState(initial_message_count=1), + ) - # Should return None, not process the tool call + # Should process normally, not treat as special result = terminate_node(state) - assert result is None + assert "uipath__agent_response_messages" in result + messages = result["uipath__agent_response_messages"] + assert len(messages) == 1 + assert messages[0]["role"] == "assistant" + assert "Done" in str(messages[0]["contentParts"][0]["data"]) class TestTerminateNodeNonConversational: diff --git a/tests/runtime/chat_message_mapper.py b/tests/runtime/chat_message_mapper.py index 554646b3..2f3c7f33 100644 --- a/tests/runtime/chat_message_mapper.py +++ b/tests/runtime/chat_message_mapper.py @@ -6,6 +6,7 @@ from langchain_core.messages import ( AIMessage, AIMessageChunk, + AnyMessage, HumanMessage, SystemMessage, ToolMessage, @@ -1352,3 +1353,268 @@ async def test_map_event_emits_message_end_after_last_tool_result(self): assert len(result2) == 2 # Tool call end + message end assert result2[0].tool_call is not None assert result2[1].end is not None + + +class TestMapLangChainMessagesToUiPathMessageData: + """Tests for map_langchain_messages_to_uipath_message_data_list static method.""" + + def test_converts_empty_messages_correctly(self): + """Should return empty list when input messages list is empty.""" + result = ( + UiPathChatMessagesMapper.map_langchain_messages_to_uipath_message_data_list( + [] + ) + ) + + assert result == [] + + def test_converts_human_message_to_user_role(self): + """Should convert HumanMessage to user role message.""" + messages: list[AnyMessage] = [HumanMessage(content="Hello")] + + result = ( + UiPathChatMessagesMapper.map_langchain_messages_to_uipath_message_data_list( + messages + ) + ) + + assert len(result) == 1 + assert result[0].role == "user" + assert len(result[0].content_parts) == 1 + assert result[0].content_parts[0].mime_type == "text/plain" + assert isinstance(result[0].content_parts[0].data, UiPathInlineValue) + assert result[0].content_parts[0].data.inline == "Hello" + + def test_converts_ai_message_to_assistant_role(self): + """Should convert AIMessage to assistant role message.""" + messages: list[AnyMessage] = [AIMessage(content="Hi there")] + + result = ( + UiPathChatMessagesMapper.map_langchain_messages_to_uipath_message_data_list( + messages + ) + ) + + assert len(result) == 1 + assert result[0].role == "assistant" + assert len(result[0].content_parts) == 1 + assert result[0].content_parts[0].mime_type == "text/markdown" + assert isinstance(result[0].content_parts[0].data, UiPathInlineValue) + assert result[0].content_parts[0].data.inline == "Hi there" + + def test_converts_ai_message_with_tool_calls(self): + """Should include tool calls in converted AI message.""" + messages: list[AnyMessage] = [ + AIMessage( + content="Let me search", + tool_calls=[ + {"name": "search", "args": {"query": "test"}, "id": "call1"} + ], + ) + ] + + result = ( + UiPathChatMessagesMapper.map_langchain_messages_to_uipath_message_data_list( + messages, include_tool_results=False + ) + ) + + assert len(result) == 1 + assert result[0].role == "assistant" + assert len(result[0].tool_calls) == 1 + assert result[0].tool_calls[0].name == "search" + assert result[0].tool_calls[0].input == {"query": "test"} + + def test_includes_tool_results_when_enabled(self): + """Should include tool results in tool calls when include_tool_results=True.""" + messages: list[AnyMessage] = [ + AIMessage( + content="Using tool", + tool_calls=[{"name": "test_tool", "args": {}, "id": "call1"}], + ), + ToolMessage( + content='{"status": "success"}', tool_call_id="call1", status="success" + ), + ] + + result = ( + UiPathChatMessagesMapper.map_langchain_messages_to_uipath_message_data_list( + messages, include_tool_results=True + ) + ) + + assert len(result) == 1 # Only AI message, tool message merged in + assert result[0].role == "assistant" + assert len(result[0].tool_calls) == 1 + assert result[0].tool_calls[0].result is not None + assert result[0].tool_calls[0].result.output == {"status": "success"} + assert result[0].tool_calls[0].result.is_error is False + + def test_excludes_tool_results_when_disabled(self): + """Should exclude tool results when include_tool_results=False.""" + messages: list[AnyMessage] = [ + AIMessage( + content="Using tool", + tool_calls=[{"name": "test_tool", "args": {}, "id": "call1"}], + ), + ToolMessage( + content='{"status": "success"}', tool_call_id="call1", status="success" + ), + ] + + result = ( + UiPathChatMessagesMapper.map_langchain_messages_to_uipath_message_data_list( + messages, include_tool_results=False + ) + ) + + assert len(result) == 1 + assert result[0].role == "assistant" + assert len(result[0].tool_calls) == 1 + # Tool call should not have result when include_tool_results=False + assert result[0].tool_calls[0].result is None + + def test_handles_tool_error_status(self): + """Should mark tool result as error when status is error.""" + messages: list[AnyMessage] = [ + AIMessage( + content="Trying tool", + tool_calls=[{"name": "failing_tool", "args": {}, "id": "call1"}], + ), + ToolMessage(content="Error occurred", tool_call_id="call1", status="error"), + ] + + result = ( + UiPathChatMessagesMapper.map_langchain_messages_to_uipath_message_data_list( + messages, include_tool_results=True + ) + ) + + assert len(result) == 1 + assert result[0].tool_calls[0].result is not None + assert result[0].tool_calls[0].result.is_error is True + assert result[0].tool_calls[0].result.output == "Error occurred" + + def test_parses_json_tool_results(self): + """Should parse JSON string results back to dict.""" + messages: list[AnyMessage] = [ + AIMessage( + content="Using tool", + tool_calls=[{"name": "test_tool", "args": {}, "id": "call1"}], + ), + ToolMessage( + content='{"data": [1, 2, 3], "count": 3}', tool_call_id="call1" + ), + ] + + result = ( + UiPathChatMessagesMapper.map_langchain_messages_to_uipath_message_data_list( + messages, include_tool_results=True + ) + ) + + assert result[0].tool_calls[0].result is not None + assert result[0].tool_calls[0].result.output == {"data": [1, 2, 3], "count": 3} + + def test_keeps_non_json_tool_results_as_string(self): + """Should keep non-JSON results as strings.""" + messages: list[AnyMessage] = [ + AIMessage( + content="Using tool", + tool_calls=[{"name": "test_tool", "args": {}, "id": "call1"}], + ), + ToolMessage(content="plain text result", tool_call_id="call1"), + ] + + result = ( + UiPathChatMessagesMapper.map_langchain_messages_to_uipath_message_data_list( + messages, include_tool_results=True + ) + ) + + assert result[0].tool_calls[0].result is not None + assert result[0].tool_calls[0].result.output == "plain text result" + + def test_handles_mixed_message_types(self): + """Should handle conversation with mixed message types including tools.""" + messages: list[AnyMessage] = [ + HumanMessage(content="Hello"), + AIMessage(content="Hi there"), + HumanMessage(content="Search for data"), + AIMessage( + content="Let me search", + tool_calls=[ + {"name": "search_tool", "args": {"query": "data"}, "id": "call1"} + ], + ), + ToolMessage( + content='{"results": ["item1", "item2"]}', tool_call_id="call1" + ), + AIMessage(content="I found the data"), + ] + + result = ( + UiPathChatMessagesMapper.map_langchain_messages_to_uipath_message_data_list( + messages, include_tool_results=True + ) + ) + + # Should skip ToolMessages, only convert Human and AI messages + assert len(result) == 5 + assert result[0].role == "user" + assert result[1].role == "assistant" + assert result[2].role == "user" + assert result[3].role == "assistant" + assert len(result[3].tool_calls) == 1 + assert result[3].tool_calls[0].result is not None + assert result[4].role == "assistant" + + def test_handles_empty_message_list(self): + """Should return empty list for empty input.""" + result = ( + UiPathChatMessagesMapper.map_langchain_messages_to_uipath_message_data_list( + [] + ) + ) + + assert result == [] + + def test_handles_empty_content_messages(self): + """Should handle messages with empty content.""" + messages: list[AnyMessage] = [ + HumanMessage(content=""), + AIMessage(content=""), + ] + + result = ( + UiPathChatMessagesMapper.map_langchain_messages_to_uipath_message_data_list( + messages + ) + ) + + assert len(result) == 2 + # Empty content should result in no text content-parts + assert len(result[0].content_parts) == 0 + assert len(result[1].content_parts) == 0 + + def test_extracts_text_from_content_blocks(self): + """Should extract text from complex content block structures.""" + messages: list[AnyMessage] = [ + HumanMessage( + content=[ + {"type": "text", "text": "first part"}, + {"type": "text", "text": " second part"}, + ] + ) + ] + + result = ( + UiPathChatMessagesMapper.map_langchain_messages_to_uipath_message_data_list( + messages + ) + ) + + assert len(result) == 1 + assert len(result[0].content_parts) == 1 + assert isinstance(result[0].content_parts[0].data, UiPathInlineValue) + assert result[0].content_parts[0].data.inline == "first part second part" diff --git a/uv.lock b/uv.lock index 2fe93f5f..02111f54 100644 --- a/uv.lock +++ b/uv.lock @@ -3323,7 +3323,7 @@ wheels = [ [[package]] name = "uipath-langchain" -version = "0.5.74" +version = "0.5.75" source = { editable = "." } dependencies = [ { name = "httpx" },