diff --git a/pyproject.toml b/pyproject.toml index 3f85d7c7e..dbccd220a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "uipath" -version = "2.8.44" +version = "2.8.45" description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools." readme = { file = "README.md", content-type = "text/markdown" } requires-python = ">=3.11" diff --git a/src/uipath/_cli/_evals/_conversational_utils.py b/src/uipath/_cli/_evals/_conversational_utils.py new file mode 100644 index 000000000..70d878b71 --- /dev/null +++ b/src/uipath/_cli/_evals/_conversational_utils.py @@ -0,0 +1,289 @@ +import uuid +from datetime import datetime, timezone +from typing import Any, List, Literal + +from pydantic import BaseModel, Field +from uipath.core.chat import ( + UiPathConversationContentPart, + UiPathConversationContentPartData, + UiPathConversationMessage, + UiPathConversationMessageData, + UiPathConversationToolCall, + UiPathConversationToolCallData, + UiPathConversationToolCallResult, + UiPathInlineValue, +) + +# Types for legacy conversational-agent evaluation input/outputs. + + +class LegacyConversationalEvalJobAttachmentReference(BaseModel): + """File attachment reference in eval messages.""" + + id: str = Field(..., alias="ID") + full_name: str = Field(..., alias="FullName") + mime_type: str = Field(..., alias="MimeType") + + +class LegacyConversationalEvalOutputToolCall(BaseModel): + """Tool call in eval output schema (no result field).""" + + name: str + arguments: dict[str, Any] + + +class LegacyConversationalEvalInputToolCallResult(BaseModel): + """Tool call result in eval input schema.""" + + value: Any + is_error: bool | None = Field(default=None, alias="isError") + + +class LegacyConversationalEvalInputToolCall(LegacyConversationalEvalOutputToolCall): + """Tool call in eval input schema (extends output tool call with result).""" + + result: LegacyConversationalEvalInputToolCallResult + + +class LegacyConversationalEvalMessage(BaseModel): + """Base eval message type.""" + + role: Literal["agent", "user"] + text: str + + +class LegacyConversationalEvalUserMessage(LegacyConversationalEvalMessage): + """User message in eval schema.""" + + role: Literal["user"] = "user" + attachments: list[LegacyConversationalEvalJobAttachmentReference] | None = Field( + default=None + ) + + +class LegacyConversationalEvalInputAgentMessage(LegacyConversationalEvalMessage): + """Agent message in eval input schema (input tool-calls contain results field).""" + + role: Literal["agent"] = "agent" + tool_calls: list[LegacyConversationalEvalInputToolCall] | None = Field( + default=None, alias="toolCalls" + ) + + +class LegacyConversationalEvalOutputAgentMessage(LegacyConversationalEvalMessage): + """Agent message in eval output schema (output tool-calls don't contain result field).""" + + role: Literal["agent"] = "agent" + tool_calls: list[LegacyConversationalEvalOutputToolCall] | None = Field( + default=None, alias="toolCalls" + ) + + +class LegacyConversationalEvalInput(BaseModel): + """Complete conversational eval input schema. + + conversationHistory: Array of exchanges, where each exchange is + [userMessage, ...agentMessages[]] + currentUserPrompt: The current user message to evaluate + """ + + conversation_history: list[ + list[ + LegacyConversationalEvalUserMessage + | LegacyConversationalEvalInputAgentMessage + ] + ] = Field(alias="conversationHistory") + current_user_prompt: LegacyConversationalEvalUserMessage = Field( + alias="currentUserPrompt" + ) + + +class LegacyConversationalEvalOutput(BaseModel): + """Complete eval output schema matching TypeScript definition. + + agentResponse: Sequence of agent messages ending with a message without tool calls + """ + + agent_response: list[LegacyConversationalEvalOutputAgentMessage] = Field( + alias="agentResponse" + ) + + +# Mapper functions to convert between UiPath standard Message format and legacy conversational formats + + +class UiPathLegacyEvalChatMessagesMapper: + @staticmethod + def legacy_conversational_eval_input_to_uipath_message_list( + eval_input: LegacyConversationalEvalInput, + ) -> List[UiPathConversationMessage]: + """Convert legacy eval input format to list of UiPathConversationMessage.""" + messages: List[UiPathConversationMessage] = [] + timestamp = ( + datetime.now(timezone.utc) + .isoformat(timespec="milliseconds") + .replace("+00:00", "Z") + ) + + # Process conversation history (list of exchanges) + for eval_exchange in eval_input.conversation_history: + for eval_message in eval_exchange: + if eval_message.role == "user": + # Convert user message + content_parts = ( + [ + UiPathConversationContentPart( + content_part_id=str(uuid.uuid4()), + mime_type="text/plain", + data=UiPathInlineValue(inline=eval_message.text), + citations=[], + created_at=timestamp, + updated_at=timestamp, + ) + ] + if eval_message.text + else [] + ) + + # TODO: Add attachments if present + # if message.attachments: + # for attachment in message.attachments: + # content_parts.append( + # UiPathConversationContentPart(...) + # ) + + messages.append( + UiPathConversationMessage( + message_id=str(uuid.uuid4()), + role="user", + content_parts=content_parts, + tool_calls=[], + interrupts=[], + created_at=timestamp, + updated_at=timestamp, + ) + ) + elif eval_message.role == "agent": + # Convert agent message + content_parts = ( + [ + UiPathConversationContentPart( + content_part_id=str(uuid.uuid4()), + mime_type="text/markdown", + data=UiPathInlineValue(inline=eval_message.text), + citations=[], + created_at=timestamp, + updated_at=timestamp, + ) + ] + if eval_message.text + else [] + ) + + # Convert tool calls if present + tool_calls: List[UiPathConversationToolCall] = [] + if eval_message.tool_calls: + for tc in eval_message.tool_calls: + tool_call = UiPathConversationToolCall( + tool_call_id=str(uuid.uuid4()), + name=tc.name, + input=tc.arguments, + timestamp=timestamp, + result=UiPathConversationToolCallResult( + timestamp=timestamp, + output=tc.result.value, + is_error=tc.result.is_error, + ), + created_at=timestamp, + updated_at=timestamp, + ) + tool_calls.append(tool_call) + + messages.append( + UiPathConversationMessage( + message_id=str(uuid.uuid4()), + role="assistant", + content_parts=content_parts, + tool_calls=tool_calls, + interrupts=[], + created_at=timestamp, + updated_at=timestamp, + ) + ) + + # Add current user prompt + content_parts = ( + [ + UiPathConversationContentPart( + content_part_id=str(uuid.uuid4()), + mime_type="text/plain", + data=UiPathInlineValue(inline=eval_input.current_user_prompt.text), + citations=[], + created_at=timestamp, + updated_at=timestamp, + ) + ] + if eval_input.current_user_prompt.text + else [] + ) + + # TODO Add attachments if present + # if eval_input.current_user_prompt.attachments: + # for attachment in eval_input.current_user_prompt.attachments: + # content_parts.append( + # UiPathConversationContentPart(...) + # ) + + messages.append( + UiPathConversationMessage( + message_id=str(uuid.uuid4()), + role="user", + content_parts=content_parts, + tool_calls=[], + interrupts=[], + created_at=timestamp, + updated_at=timestamp, + ) + ) + + return messages + + @staticmethod + def legacy_conversational_eval_output_to_uipath_message_data_list( + eval_output: LegacyConversationalEvalOutput, + ) -> List[UiPathConversationMessageData]: + """Convert legacy eval output format to list of UiPathConversationMessageData.""" + messages: List[UiPathConversationMessageData] = [] + + for eval_agent_message in eval_output.agent_response: + content_parts = ( + [ + UiPathConversationContentPartData( + mime_type="text/markdown", + data=UiPathInlineValue(inline=eval_agent_message.text), + citations=[], + ) + ] + if eval_agent_message.text + else [] + ) + + tool_calls: List[UiPathConversationToolCallData] = [] + if eval_agent_message.tool_calls: + for tc in eval_agent_message.tool_calls: + tool_call = UiPathConversationToolCallData( + name=tc.name, + input=tc.arguments, + ) + tool_calls.append(tool_call) + + messages.append( + UiPathConversationMessageData( + role="assistant", + content_parts=content_parts, + tool_calls=tool_calls, + interrupts=[], + ) + ) + + return messages diff --git a/src/uipath/_cli/_evals/_models/_evaluation_set.py b/src/uipath/_cli/_evals/_models/_evaluation_set.py index bf1a08d89..258cfcf79 100644 --- a/src/uipath/_cli/_evals/_models/_evaluation_set.py +++ b/src/uipath/_cli/_evals/_models/_evaluation_set.py @@ -4,6 +4,10 @@ from pydantic import BaseModel, ConfigDict, Field from pydantic.alias_generators import to_camel +from uipath._cli._evals._conversational_utils import ( + LegacyConversationalEvalInput, + LegacyConversationalEvalOutput, +) from uipath._cli._evals.mocks.types import ( InputMockingStrategy, MockingStrategy, @@ -115,6 +119,12 @@ class LegacyEvaluationItem(BaseModel): tools_to_simulate: list[ToolSimulation] = Field( default_factory=list, alias="toolsToSimulate" ) + conversational_inputs: LegacyConversationalEvalInput | None = Field( + default=None, alias="conversationalInputs" + ) + conversational_expected_output: LegacyConversationalEvalOutput | None = Field( + default=None, alias="conversationalExpectedOutput" + ) class EvaluationSet(BaseModel): diff --git a/src/uipath/_cli/_utils/_eval_set.py b/src/uipath/_cli/_utils/_eval_set.py index bbc9d5047..4155b9b01 100644 --- a/src/uipath/_cli/_utils/_eval_set.py +++ b/src/uipath/_cli/_utils/_eval_set.py @@ -5,6 +5,7 @@ import click from pydantic import ValidationError +from uipath._cli._evals._conversational_utils import UiPathLegacyEvalChatMessagesMapper from uipath._cli._evals._evaluator_factory import EvaluatorFactory from uipath._cli._evals._models._evaluation_set import ( EvaluationItem, @@ -141,6 +142,27 @@ def migrate_evaluation_item( prompt=evaluation.simulation_instructions or "", tools_to_simulate=evaluation.tools_to_simulate or [], ) + + if evaluation.conversational_inputs: + conversational_messages_input = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_input_to_uipath_message_list( + evaluation.conversational_inputs + ) + evaluation.inputs["messages"] = [ + message.model_dump(by_alias=True) + for message in conversational_messages_input + ] + + if evaluation.conversational_expected_output: + conversational_messages_expected_output = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_output_to_uipath_message_data_list( + evaluation.conversational_expected_output + ) + evaluation.expected_output[ + "uipath__agent_response_messages" + ] = [ + message.model_dump(by_alias=True) + for message in conversational_messages_expected_output + ] + return EvaluationItem.model_validate( { "id": evaluation.id, diff --git a/tests/cli/eval/test_conversational_utils.py b/tests/cli/eval/test_conversational_utils.py new file mode 100644 index 000000000..953fce94f --- /dev/null +++ b/tests/cli/eval/test_conversational_utils.py @@ -0,0 +1,535 @@ +"""Tests for conversational eval utilities.""" + +from uipath.core.chat import UiPathInlineValue + +from uipath._cli._evals._conversational_utils import ( + LegacyConversationalEvalInput, + LegacyConversationalEvalInputAgentMessage, + LegacyConversationalEvalInputToolCall, + LegacyConversationalEvalInputToolCallResult, + LegacyConversationalEvalOutput, + LegacyConversationalEvalOutputAgentMessage, + LegacyConversationalEvalOutputToolCall, + LegacyConversationalEvalUserMessage, + UiPathLegacyEvalChatMessagesMapper, +) + + +class TestLegacyConversationalEvalInputToUiPathMessages: + """Tests for converting legacy eval input to UiPath messages.""" + + def test_converts_simple_conversation(self): + """Should convert simple user-agent conversation.""" + eval_input = LegacyConversationalEvalInput( + conversationHistory=[ + [ + LegacyConversationalEvalUserMessage(text="Hello"), + LegacyConversationalEvalInputAgentMessage(text="Hi there!"), + ] + ], + currentUserPrompt=LegacyConversationalEvalUserMessage(text="How are you?"), + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_input_to_uipath_message_list( + eval_input + ) + + # Should have 3 messages: user, agent, user + assert len(result) == 3 + assert result[0].role == "user" + assert isinstance(result[0].content_parts[0].data, UiPathInlineValue) + assert result[0].content_parts[0].data.inline == "Hello" + assert result[1].role == "assistant" + assert isinstance(result[1].content_parts[0].data, UiPathInlineValue) + assert result[1].content_parts[0].data.inline == "Hi there!" + assert result[2].role == "user" + assert isinstance(result[2].content_parts[0].data, UiPathInlineValue) + assert result[2].content_parts[0].data.inline == "How are you?" + + def test_converts_user_message_with_text_plain_mime_type(self): + """User messages should have text/plain mime type.""" + eval_input = LegacyConversationalEvalInput( + conversationHistory=[], + currentUserPrompt=LegacyConversationalEvalUserMessage(text="Test"), + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_input_to_uipath_message_list( + eval_input + ) + + assert len(result) == 1 + assert result[0].content_parts[0].mime_type == "text/plain" + + def test_converts_agent_message_with_text_markdown_mime_type(self): + """Agent messages should have text/markdown mime type.""" + eval_input = LegacyConversationalEvalInput( + conversationHistory=[ + [ + LegacyConversationalEvalUserMessage(text="Question"), + LegacyConversationalEvalInputAgentMessage(text="**Answer**"), + ] + ], + currentUserPrompt=LegacyConversationalEvalUserMessage(text="Next"), + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_input_to_uipath_message_list( + eval_input + ) + + # Agent message is at index 1 + assert result[1].content_parts[0].mime_type == "text/markdown" + + def test_converts_agent_message_with_tool_calls(self): + """Should convert agent messages with tool calls and results.""" + eval_input = LegacyConversationalEvalInput( + conversationHistory=[ + [ + LegacyConversationalEvalUserMessage(text="Search for data"), + LegacyConversationalEvalInputAgentMessage( + text="Let me search", + toolCalls=[ + LegacyConversationalEvalInputToolCall( + name="search_tool", + arguments={"query": "test"}, + result=LegacyConversationalEvalInputToolCallResult( + value={"results": ["item1", "item2"]}, + isError=False, + ), + ) + ], + ), + ] + ], + currentUserPrompt=LegacyConversationalEvalUserMessage(text="Thanks"), + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_input_to_uipath_message_list( + eval_input + ) + + agent_message = result[1] + assert agent_message.role == "assistant" + assert len(agent_message.tool_calls) == 1 + assert agent_message.tool_calls[0].name == "search_tool" + assert agent_message.tool_calls[0].input == {"query": "test"} + assert agent_message.tool_calls[0].result is not None + assert agent_message.tool_calls[0].result.output == { + "results": ["item1", "item2"] + } + assert agent_message.tool_calls[0].result.is_error is False + + def test_converts_tool_call_with_error_result(self): + """Should handle tool calls with error results.""" + eval_input = LegacyConversationalEvalInput( + conversationHistory=[ + [ + LegacyConversationalEvalUserMessage(text="Do something"), + LegacyConversationalEvalInputAgentMessage( + text="Trying", + toolCalls=[ + LegacyConversationalEvalInputToolCall( + name="failing_tool", + arguments={}, + result=LegacyConversationalEvalInputToolCallResult( + value="Error occurred", + isError=True, + ), + ) + ], + ), + ] + ], + currentUserPrompt=LegacyConversationalEvalUserMessage(text="Ok"), + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_input_to_uipath_message_list( + eval_input + ) + + tool_call = result[1].tool_calls[0] + assert tool_call.result is not None + assert tool_call.result.is_error is True + assert tool_call.result.output == "Error occurred" + + def test_converts_multiple_exchanges(self): + """Should handle multiple conversation exchanges.""" + eval_input = LegacyConversationalEvalInput( + conversationHistory=[ + [ + LegacyConversationalEvalUserMessage(text="First question"), + LegacyConversationalEvalInputAgentMessage(text="First answer"), + ], + [ + LegacyConversationalEvalUserMessage(text="Second question"), + LegacyConversationalEvalInputAgentMessage(text="Second answer"), + ], + ], + currentUserPrompt=LegacyConversationalEvalUserMessage( + text="Third question" + ), + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_input_to_uipath_message_list( + eval_input + ) + + assert len(result) == 5 # 2 exchanges (4 messages) + current prompt + assert isinstance(result[0].content_parts[0].data, UiPathInlineValue) + assert result[0].content_parts[0].data.inline == "First question" + assert isinstance(result[1].content_parts[0].data, UiPathInlineValue) + assert result[1].content_parts[0].data.inline == "First answer" + assert isinstance(result[2].content_parts[0].data, UiPathInlineValue) + assert result[2].content_parts[0].data.inline == "Second question" + assert isinstance(result[3].content_parts[0].data, UiPathInlineValue) + assert result[3].content_parts[0].data.inline == "Second answer" + assert isinstance(result[4].content_parts[0].data, UiPathInlineValue) + assert result[4].content_parts[0].data.inline == "Third question" + + def test_converts_exchange_with_multiple_agent_messages(self): + """Should handle exchanges with multiple agent responses.""" + eval_input = LegacyConversationalEvalInput( + conversationHistory=[ + [ + LegacyConversationalEvalUserMessage(text="Question"), + LegacyConversationalEvalInputAgentMessage( + text="Using tool", + toolCalls=[ + LegacyConversationalEvalInputToolCall( + name="tool1", + arguments={"x": 1}, + result=LegacyConversationalEvalInputToolCallResult( + value="result1", + isError=False, + ), + ) + ], + ), + LegacyConversationalEvalInputAgentMessage(text="Final answer"), + ] + ], + currentUserPrompt=LegacyConversationalEvalUserMessage(text="Next"), + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_input_to_uipath_message_list( + eval_input + ) + + assert len(result) == 4 # user, agent with tool, agent final, current user + assert result[0].role == "user" + assert result[1].role == "assistant" + assert len(result[1].tool_calls) == 1 + assert result[2].role == "assistant" + assert len(result[2].tool_calls) == 0 + assert result[3].role == "user" + + def test_generates_unique_ids_for_messages(self): + """Should generate unique message IDs.""" + eval_input = LegacyConversationalEvalInput( + conversationHistory=[ + [ + LegacyConversationalEvalUserMessage(text="Q1"), + LegacyConversationalEvalInputAgentMessage(text="A1"), + ] + ], + currentUserPrompt=LegacyConversationalEvalUserMessage(text="Q2"), + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_input_to_uipath_message_list( + eval_input + ) + + message_ids = [msg.message_id for msg in result] + assert len(message_ids) == len(set(message_ids)) # All unique + + def test_generates_unique_content_part_ids(self): + """Should generate unique content part IDs.""" + eval_input = LegacyConversationalEvalInput( + conversationHistory=[ + [ + LegacyConversationalEvalUserMessage(text="Q"), + LegacyConversationalEvalInputAgentMessage(text="A"), + ] + ], + currentUserPrompt=LegacyConversationalEvalUserMessage(text="Q2"), + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_input_to_uipath_message_list( + eval_input + ) + + content_part_ids = [ + part.content_part_id for msg in result for part in msg.content_parts + ] + assert len(content_part_ids) == len(set(content_part_ids)) + + def test_empty_conversation_history(self): + """Should handle empty conversation history.""" + eval_input = LegacyConversationalEvalInput( + conversationHistory=[], + currentUserPrompt=LegacyConversationalEvalUserMessage(text="First message"), + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_input_to_uipath_message_list( + eval_input + ) + + assert len(result) == 1 + assert result[0].role == "user" + assert isinstance(result[0].content_parts[0].data, UiPathInlineValue) + assert result[0].content_parts[0].data.inline == "First message" + + def test_blank_text_in_message_creates_empty_content_parts(self): + """Should create empty content_parts when user message has blank text.""" + eval_input = LegacyConversationalEvalInput( + conversationHistory=[ + [ + LegacyConversationalEvalUserMessage(text=""), + LegacyConversationalEvalInputAgentMessage(text=""), + ] + ], + currentUserPrompt=LegacyConversationalEvalUserMessage(text=""), + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_input_to_uipath_message_list( + eval_input + ) + + # Empty text content should result in no text content-parts + assert len(result[0].content_parts) == 0 + assert len(result[1].content_parts) == 0 + assert len(result[2].content_parts) == 0 + + def test_blank_text_with_tool_calls_creates_empty_content_parts(self): + """Should create empty content_parts when agent message with tool calls has blank text.""" + eval_input = LegacyConversationalEvalInput( + conversationHistory=[ + [ + LegacyConversationalEvalUserMessage(text="Search for data"), + LegacyConversationalEvalInputAgentMessage( + text="", + toolCalls=[ + LegacyConversationalEvalInputToolCall( + name="search_tool", + arguments={"query": "test"}, + result=LegacyConversationalEvalInputToolCallResult( + value={"results": ["item1"]}, + isError=False, + ), + ) + ], + ), + ] + ], + currentUserPrompt=LegacyConversationalEvalUserMessage(text="Thanks"), + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_input_to_uipath_message_list( + eval_input + ) + + agent_message = result[1] + assert agent_message.role == "assistant" + # Empty content should result in no text content-parts + assert len(agent_message.content_parts) == 0 + # Tool calls should still be present + assert len(agent_message.tool_calls) == 1 + + +class TestLegacyConversationalEvalOutputToUiPathMessageData: + """Tests for converting legacy eval output to UiPath message data.""" + + def test_converts_simple_agent_response(self): + """Should convert simple agent response.""" + eval_output = LegacyConversationalEvalOutput( + agentResponse=[ + LegacyConversationalEvalOutputAgentMessage(text="Here is the answer") + ] + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_output_to_uipath_message_data_list( + eval_output + ) + + assert len(result) == 1 + assert result[0].role == "assistant" + assert len(result[0].content_parts) == 1 + assert isinstance(result[0].content_parts[0].data, UiPathInlineValue) + assert result[0].content_parts[0].data.inline == "Here is the answer" + assert result[0].content_parts[0].mime_type == "text/markdown" + + def test_converts_agent_response_with_tool_calls(self): + """Should convert agent responses with tool calls.""" + eval_output = LegacyConversationalEvalOutput( + agentResponse=[ + LegacyConversationalEvalOutputAgentMessage( + text="Using tool", + toolCalls=[ + LegacyConversationalEvalOutputToolCall( + name="search", + arguments={"query": "test"}, + ) + ], + ) + ] + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_output_to_uipath_message_data_list( + eval_output + ) + + assert len(result) == 1 + assert len(result[0].tool_calls) == 1 + assert result[0].tool_calls[0].name == "search" + assert result[0].tool_calls[0].input == {"query": "test"} + # Output tool calls should not have result field + assert result[0].tool_calls[0].result is None + + def test_converts_multiple_agent_messages(self): + """Should convert multiple agent messages in sequence.""" + eval_output = LegacyConversationalEvalOutput( + agentResponse=[ + LegacyConversationalEvalOutputAgentMessage( + text="First response", + toolCalls=[ + LegacyConversationalEvalOutputToolCall( + name="tool1", + arguments={}, + ) + ], + ), + LegacyConversationalEvalOutputAgentMessage(text="Final response"), + ] + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_output_to_uipath_message_data_list( + eval_output + ) + + assert len(result) == 2 + assert isinstance(result[0].content_parts[0].data, UiPathInlineValue) + assert result[0].content_parts[0].data.inline == "First response" + assert len(result[0].tool_calls) == 1 + assert isinstance(result[1].content_parts[0].data, UiPathInlineValue) + assert result[1].content_parts[0].data.inline == "Final response" + assert len(result[1].tool_calls) == 0 + + def test_converts_multiple_tool_calls_in_message(self): + """Should handle multiple tool calls in a single message.""" + eval_output = LegacyConversationalEvalOutput( + agentResponse=[ + LegacyConversationalEvalOutputAgentMessage( + text="Using multiple tools", + toolCalls=[ + LegacyConversationalEvalOutputToolCall( + name="tool1", + arguments={"a": 1}, + ), + LegacyConversationalEvalOutputToolCall( + name="tool2", + arguments={"b": 2}, + ), + ], + ) + ] + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_output_to_uipath_message_data_list( + eval_output + ) + + assert len(result) == 1 + assert len(result[0].tool_calls) == 2 + assert result[0].tool_calls[0].name == "tool1" + assert result[0].tool_calls[0].input == {"a": 1} + assert result[0].tool_calls[1].name == "tool2" + assert result[0].tool_calls[1].input == {"b": 2} + + def test_agent_message_without_tool_calls(self): + """Should handle agent messages without tool calls.""" + eval_output = LegacyConversationalEvalOutput( + agentResponse=[ + LegacyConversationalEvalOutputAgentMessage(text="Simple response") + ] + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_output_to_uipath_message_data_list( + eval_output + ) + + assert len(result) == 1 + assert len(result[0].tool_calls) == 0 + + def test_empty_agent_response(self): + """Should handle empty agent response list.""" + eval_output = LegacyConversationalEvalOutput(agentResponse=[]) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_output_to_uipath_message_data_list( + eval_output + ) + + assert result == [] + + def test_preserves_empty_tool_arguments(self): + """Should preserve empty tool arguments dict.""" + eval_output = LegacyConversationalEvalOutput( + agentResponse=[ + LegacyConversationalEvalOutputAgentMessage( + text="Using tool", + toolCalls=[ + LegacyConversationalEvalOutputToolCall( + name="no_arg_tool", + arguments={}, + ) + ], + ) + ] + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_output_to_uipath_message_data_list( + eval_output + ) + + assert result[0].tool_calls[0].input == {} + + def test_blank_text_in_agent_response_creates_empty_content_parts(self): + """Should create empty content_parts when agent response has blank text.""" + eval_output = LegacyConversationalEvalOutput( + agentResponse=[LegacyConversationalEvalOutputAgentMessage(text="")] + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_output_to_uipath_message_data_list( + eval_output + ) + + assert len(result) == 1 + assert result[0].role == "assistant" + # Empty text should result in no text content-parts + assert len(result[0].content_parts) == 0 + + def test_blank_text_with_tool_calls_in_agent_response_creates_empty_content_parts( + self, + ): + """Should create empty content_parts when agent response with tool calls has blank text.""" + eval_output = LegacyConversationalEvalOutput( + agentResponse=[ + LegacyConversationalEvalOutputAgentMessage( + text="", + toolCalls=[ + LegacyConversationalEvalOutputToolCall( + name="search", + arguments={"query": "test"}, + ) + ], + ) + ] + ) + + result = UiPathLegacyEvalChatMessagesMapper.legacy_conversational_eval_output_to_uipath_message_data_list( + eval_output + ) + + assert len(result) == 1 + # Empty text should result in no text content-parts + assert len(result[0].content_parts) == 0 + # Tool calls should still be present + assert len(result[0].tool_calls) == 1 diff --git a/uv.lock b/uv.lock index c6fa2bc97..d1920ee55 100644 --- a/uv.lock +++ b/uv.lock @@ -2531,7 +2531,7 @@ wheels = [ [[package]] name = "uipath" -version = "2.8.44" +version = "2.8.45" source = { editable = "." } dependencies = [ { name = "applicationinsights" },