diff --git a/tests/agent/react/test_json_utils.py b/tests/agent/react/test_json_utils.py new file mode 100644 index 000000000..43c2f32ee --- /dev/null +++ b/tests/agent/react/test_json_utils.py @@ -0,0 +1,217 @@ +"""Tests for json_utils.py — JSONPath extraction from Pydantic models.""" + +from typing import Optional + +import pytest +from pydantic import BaseModel + +from uipath_langchain.agent.react.json_utils import ( + _create_type_matcher, + _is_pydantic_model, + _unwrap_optional, + extract_values_by_paths, + get_json_paths_by_type, +) + +# --- Test models --- + + +class Attachment(BaseModel): + id: str + filename: str + + +class NestedContainer(BaseModel): + attachment: Attachment + label: str + + +class ModelWithSimpleField(BaseModel): + attachment: Attachment + name: str + + +class ModelWithArrayField(BaseModel): + attachments: list[Attachment] + count: int + + +class ModelWithOptionalField(BaseModel): + attachment: Optional[Attachment] = None + name: str + + +class ModelWithNestedModel(BaseModel): + container: NestedContainer + title: str + + +class ModelWithArrayOfNested(BaseModel): + containers: list[NestedContainer] + + +class ModelWithNoTargetType(BaseModel): + name: str + value: int + + +class ModelWithMixedFields(BaseModel): + single: Attachment + multiple: list[Attachment] + label: str + + +# --- Tests for get_json_paths_by_type --- + + +class TestGetJsonPathsByType: + """Tests for get_json_paths_by_type.""" + + def test_simple_field(self) -> None: + paths = get_json_paths_by_type(ModelWithSimpleField, "Attachment") + assert paths == ["$.attachment"] + + def test_array_field(self) -> None: + paths = get_json_paths_by_type(ModelWithArrayField, "Attachment") + assert paths == ["$.attachments[*]"] + + def test_optional_field_unwrapped(self) -> None: + paths = get_json_paths_by_type(ModelWithOptionalField, "Attachment") + assert paths == ["$.attachment"] + + def test_nested_model_field(self) -> None: + paths = get_json_paths_by_type(ModelWithNestedModel, "Attachment") + assert paths == ["$.container.attachment"] + + def test_array_of_nested_models(self) -> None: + paths = get_json_paths_by_type(ModelWithArrayOfNested, "Attachment") + assert paths == ["$.containers[*].attachment"] + + def test_no_matching_type_returns_empty(self) -> None: + paths = get_json_paths_by_type(ModelWithNoTargetType, "Attachment") + assert paths == [] + + def test_mixed_simple_and_array(self) -> None: + paths = get_json_paths_by_type(ModelWithMixedFields, "Attachment") + assert "$.single" in paths + assert "$.multiple[*]" in paths + assert len(paths) == 2 + + +# --- Tests for extract_values_by_paths --- + + +class TestExtractValuesByPaths: + """Tests for extract_values_by_paths.""" + + def test_extract_from_dict_simple_path(self) -> None: + obj = {"attachment": {"id": "123", "filename": "doc.pdf"}, "name": "test"} + result = extract_values_by_paths(obj, ["$.attachment"]) + assert result == [{"id": "123", "filename": "doc.pdf"}] + + def test_extract_from_dict_array_path(self) -> None: + obj = { + "attachments": [ + {"id": "1", "filename": "a.pdf"}, + {"id": "2", "filename": "b.pdf"}, + ], + "count": 2, + } + result = extract_values_by_paths(obj, ["$.attachments[*]"]) + assert len(result) == 2 + assert result[0]["id"] == "1" + assert result[1]["id"] == "2" + + def test_extract_from_basemodel(self) -> None: + obj = ModelWithSimpleField( + attachment=Attachment(id="456", filename="img.png"), + name="test", + ) + result = extract_values_by_paths(obj, ["$.attachment"]) + assert len(result) == 1 + assert result[0]["id"] == "456" + + def test_extract_multiple_paths(self) -> None: + obj = { + "single": {"id": "s1", "filename": "s.pdf"}, + "multiple": [ + {"id": "m1", "filename": "m1.pdf"}, + {"id": "m2", "filename": "m2.pdf"}, + ], + "label": "test", + } + result = extract_values_by_paths(obj, ["$.single", "$.multiple[*]"]) + assert len(result) == 3 + + def test_extract_no_match_returns_empty(self) -> None: + obj = {"name": "test"} + result = extract_values_by_paths(obj, ["$.nonexistent"]) + assert result == [] + + def test_extract_empty_paths_returns_empty(self) -> None: + obj = {"name": "test"} + result = extract_values_by_paths(obj, []) + assert result == [] + + def test_extract_nested_path(self) -> None: + obj = { + "container": { + "attachment": {"id": "nested", "filename": "n.pdf"}, + "label": "c", + }, + "title": "t", + } + result = extract_values_by_paths(obj, ["$.container.attachment"]) + assert len(result) == 1 + assert result[0]["id"] == "nested" + + +# --- Tests for helper functions --- + + +class TestUnwrapOptional: + """Tests for _unwrap_optional.""" + + @pytest.mark.parametrize( + "input_type,expected", + [ + (Optional[str], str), + (str, str), + (Optional[Attachment], Attachment), + ], + ids=["optional-str", "plain-str", "optional-basemodel"], + ) + def test_unwraps_correctly(self, input_type: type, expected: type) -> None: + assert _unwrap_optional(input_type) is expected + + +class TestIsPydanticModel: + """Tests for _is_pydantic_model.""" + + @pytest.mark.parametrize( + "value,expected", + [ + (Attachment, True), + (str, False), + (None, False), + (Attachment(id="1", filename="f"), False), + ], + ids=["basemodel-class", "builtin-type", "none", "instance"], + ) + def test_identifies_pydantic_models(self, value: object, expected: bool) -> None: + assert _is_pydantic_model(value) is expected + + +class TestCreateTypeMatcher: + """Tests for _create_type_matcher.""" + + def test_matches_by_class_and_string_annotation(self) -> None: + matcher = _create_type_matcher("Attachment", None) + assert matcher(Attachment) is True + assert matcher("Attachment") is True + assert matcher(str) is False + assert matcher("OtherType") is False + + def test_matches_by_target_type(self) -> None: + matcher = _create_type_matcher("Attachment", Attachment) + assert matcher(Attachment) is True diff --git a/tests/agent/react/test_merge_objects.py b/tests/agent/react/test_merge_objects.py index a112cec85..e7f6e40da 100644 --- a/tests/agent/react/test_merge_objects.py +++ b/tests/agent/react/test_merge_objects.py @@ -3,6 +3,7 @@ import pytest from pydantic import BaseModel +from uipath_langchain.agent.react.reducers import merge_dicts as reducer_merge_dicts from uipath_langchain.agent.react.reducers import merge_objects @@ -184,3 +185,35 @@ def test_invalid_field_names_ignored(self): assert result.name == "updated" # Invalid field should not exist assert not hasattr(result, "invalid_field") + + +class TestMergeDicts: + """Test merge_dicts reducer from reducers.py.""" + + def test_empty_right_returns_left(self): + left = {"a": 1, "b": 2} + result = reducer_merge_dicts(left, {}) + assert result is left + + def test_empty_left_returns_right(self): + right = {"a": 1} + result = reducer_merge_dicts({}, right) + assert result is right + + def test_disjoint_keys_merged(self): + left = {"a": 1} + right = {"b": 2} + result = reducer_merge_dicts(left, right) + assert result == {"a": 1, "b": 2} + + def test_overlapping_keys_right_wins(self): + left = {"a": 1, "b": 2} + right = {"b": 3, "c": 4} + result = reducer_merge_dicts(left, right) + assert result == {"a": 1, "b": 3, "c": 4} + + def test_none_values_in_right_override(self): + left = {"a": 1} + right = {"a": None} + result = reducer_merge_dicts(left, right) + assert result == {"a": None} diff --git a/tests/agent/react/test_router.py b/tests/agent/react/test_router.py index 8cbf83691..9201a9cf0 100644 --- a/tests/agent/react/test_router.py +++ b/tests/agent/react/test_router.py @@ -278,3 +278,91 @@ def test_empty_ai_response_raises_exception(self, route_function_no_limit): match="Agent produced empty response without tool calls", ): route_function_no_limit(state) + + +class TestRouteAgentMultipleToolCallSequencing: + """Test sequential dispatching of multiple tool calls in a single AI message.""" + + def test_three_tools_dispatched_sequentially(self): + """Router dispatches each tool in order when multiple tool calls exist.""" + route_func = create_route_agent(thinking_messages_limit=0) + + ai_message = AIMessage( + content="Using three tools", + tool_calls=[ + {"name": "tool_a", "args": {}, "id": "call_a"}, + {"name": "tool_b", "args": {}, "id": "call_b"}, + {"name": "tool_c", "args": {}, "id": "call_c"}, + ], + ) + + # Step 1: No tool results yet — route to first tool + state_0 = MockAgentGraphState( + messages=[HumanMessage(content="query"), ai_message] + ) + assert route_func(state_0) == "tool_a" + + # Step 2: First tool done — route to second + state_1 = MockAgentGraphState( + messages=[ + HumanMessage(content="query"), + ai_message, + ToolMessage(content="result_a", tool_call_id="call_a"), + ] + ) + assert route_func(state_1) == "tool_b" + + # Step 3: Two tools done — route to third + state_2 = MockAgentGraphState( + messages=[ + HumanMessage(content="query"), + ai_message, + ToolMessage(content="result_a", tool_call_id="call_a"), + ToolMessage(content="result_b", tool_call_id="call_b"), + ] + ) + assert route_func(state_2) == "tool_c" + + # Step 4: All done — route back to agent + state_3 = MockAgentGraphState( + messages=[ + HumanMessage(content="query"), + ai_message, + ToolMessage(content="result_a", tool_call_id="call_a"), + ToolMessage(content="result_b", tool_call_id="call_b"), + ToolMessage(content="result_c", tool_call_id="call_c"), + ] + ) + assert route_func(state_3) == AgentGraphNode.AGENT + + def test_flow_control_tool_among_multiple_terminates(self): + """Router should terminate when the next tool is a flow control tool.""" + route_func = create_route_agent(thinking_messages_limit=0) + + ai_message = AIMessage( + content="Using tools then ending", + tool_calls=[ + {"name": "regular_tool", "args": {}, "id": "call_1"}, + { + "name": END_EXECUTION_TOOL.name, + "args": {"reason": "done"}, + "id": "call_2", + }, + ], + ) + + # First tool is regular + state_0 = MockAgentGraphState( + messages=[HumanMessage(content="query"), ai_message] + ) + assert route_func(state_0) == "regular_tool" + + # After first tool done, next is flow control — terminate + state_1 = MockAgentGraphState( + messages=[ + HumanMessage(content="query"), + ai_message, + ToolMessage(content="done", tool_call_id="call_1"), + ] + ) + assert route_func(state_1) == AgentGraphNode.TERMINATE diff --git a/tests/agent/tools/test_mcp_tool.py b/tests/agent/tools/test_mcp_tool.py new file mode 100644 index 000000000..bc7dce49e --- /dev/null +++ b/tests/agent/tools/test_mcp_tool.py @@ -0,0 +1,221 @@ +"""Tests for mcp_tool.py — deduplication, filtering, and tool creation.""" + +from unittest.mock import MagicMock + +import pytest +from langchain_core.tools import BaseTool +from uipath.agent.models.agent import ( + AgentMcpResourceConfig, + AgentMcpTool, + AgentResourceType, +) + +from uipath_langchain.agent.tools.mcp.mcp_tool import ( + _deduplicate_tools, + _filter_tools, + create_mcp_tools, + create_mcp_tools_from_metadata_for_mcp_server, +) + + +def _make_tool(name: str) -> BaseTool: + """Create a mock BaseTool with the given name.""" + tool = MagicMock(spec=BaseTool) + tool.name = name + return tool + + +def _make_mcp_resource( + available_tools: list[AgentMcpTool] | None = None, + is_enabled: bool = True, +) -> AgentMcpResourceConfig: + """Create an AgentMcpResourceConfig for testing.""" + if available_tools is None: + available_tools = [ + AgentMcpTool( + name="tool_a", + description="Tool A", + inputSchema={"type": "object", "properties": {}}, + ), + ] + return AgentMcpResourceConfig( + name="test-mcp", + description="Test MCP server", + **{"$resourceType": AgentResourceType.MCP}, + folderPath="/Shared", + slug="test-slug", + availableTools=available_tools, + isEnabled=is_enabled, + ) + + +class TestDeduplicateTools: + """Tests for _deduplicate_tools.""" + + def test_unique_names_unchanged(self) -> None: + tools = [_make_tool("alpha"), _make_tool("beta"), _make_tool("gamma")] + result = _deduplicate_tools(tools) + assert [t.name for t in result] == ["alpha", "beta", "gamma"] + + def test_duplicate_names_get_numeric_suffix(self) -> None: + tools = [_make_tool("search"), _make_tool("calc"), _make_tool("search")] + result = _deduplicate_tools(tools) + assert [t.name for t in result] == ["search_1", "calc", "search_2"] + + def test_empty_list(self) -> None: + assert _deduplicate_tools([]) == [] + + +class TestFilterTools: + """Tests for _filter_tools.""" + + def test_filter_keeps_matching_tools(self) -> None: + tools = [_make_tool("a"), _make_tool("b"), _make_tool("c")] + cfg = _make_mcp_resource( + available_tools=[ + AgentMcpTool( + name="a", + description="A", + inputSchema={"type": "object", "properties": {}}, + ), + AgentMcpTool( + name="c", + description="C", + inputSchema={"type": "object", "properties": {}}, + ), + ] + ) + result = _filter_tools(tools, cfg) + assert [t.name for t in result] == ["a", "c"] + + def test_filter_removes_all_when_none_match(self) -> None: + tools = [_make_tool("x"), _make_tool("y")] + cfg = _make_mcp_resource( + available_tools=[ + AgentMcpTool( + name="z", + description="Z", + inputSchema={"type": "object", "properties": {}}, + ), + ] + ) + result = _filter_tools(tools, cfg) + assert result == [] + + def test_filter_empty_tools_list(self) -> None: + cfg = _make_mcp_resource() + result = _filter_tools([], cfg) + assert result == [] + + +class TestCreateMcpTools: + """Tests for create_mcp_tools async context manager.""" + + @pytest.mark.asyncio + async def test_missing_uipath_url_raises( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + monkeypatch.delenv("UIPATH_URL", raising=False) + monkeypatch.delenv("UIPATH_ACCESS_TOKEN", raising=False) + + cfg = _make_mcp_resource() + with pytest.raises(ValueError, match="UIPATH_URL"): + async with create_mcp_tools(cfg): + pass + + @pytest.mark.asyncio + async def test_missing_access_token_raises( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + monkeypatch.setenv("UIPATH_URL", "https://example.com") + monkeypatch.delenv("UIPATH_ACCESS_TOKEN", raising=False) + + cfg = _make_mcp_resource() + with pytest.raises(ValueError, match="UIPATH_ACCESS_TOKEN"): + async with create_mcp_tools(cfg): + pass + + @pytest.mark.asyncio + async def test_disabled_configs_yield_empty( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + monkeypatch.setenv("UIPATH_URL", "https://example.com") + monkeypatch.setenv("UIPATH_ACCESS_TOKEN", "test-token") + + # Both single config and list of configs should yield empty + cfg = _make_mcp_resource(is_enabled=False) + async with create_mcp_tools(cfg) as tools: + assert tools == [] + + configs = [ + _make_mcp_resource(is_enabled=False), + _make_mcp_resource(is_enabled=False), + ] + async with create_mcp_tools(configs) as tools: + assert tools == [] + + +class TestCreateMcpToolsFromMetadata: + """Tests for create_mcp_tools_from_metadata_for_mcp_server.""" + + @pytest.mark.asyncio + async def test_creates_tools_with_correct_metadata(self) -> None: + mcp_tools = [ + AgentMcpTool( + name="get_weather", + description="Get weather data", + inputSchema={ + "type": "object", + "properties": {"city": {"type": "string"}}, + }, + ), + AgentMcpTool( + name="search_docs", + description="Search documents", + inputSchema={ + "type": "object", + "properties": {"query": {"type": "string"}}, + }, + ), + ] + cfg = _make_mcp_resource(available_tools=mcp_tools) + mock_client = MagicMock() + + tools = await create_mcp_tools_from_metadata_for_mcp_server(cfg, mock_client) + + assert len(tools) == 2 + assert tools[0].name == "get_weather" + assert tools[0].description == "Get weather data" + assert tools[1].name == "search_docs" + # Validate metadata on first tool + assert tools[0].metadata is not None + assert tools[0].metadata["tool_type"] == "mcp" + assert tools[0].metadata["display_name"] == "get_weather" + assert tools[0].metadata["folder_path"] == "/Shared" + assert tools[0].metadata["slug"] == "test-slug" + + @pytest.mark.asyncio + async def test_empty_available_tools_returns_empty(self) -> None: + cfg = _make_mcp_resource(available_tools=[]) + mock_client = MagicMock() + + tools = await create_mcp_tools_from_metadata_for_mcp_server(cfg, mock_client) + + assert tools == [] + + @pytest.mark.asyncio + async def test_tool_name_sanitized(self) -> None: + mcp_tools = [ + AgentMcpTool( + name="my tool with spaces!", + description="A tool", + inputSchema={"type": "object", "properties": {}}, + ), + ] + cfg = _make_mcp_resource(available_tools=mcp_tools) + mock_client = MagicMock() + + tools = await create_mcp_tools_from_metadata_for_mcp_server(cfg, mock_client) + + assert " " not in tools[0].name + assert "!" not in tools[0].name diff --git a/tests/agent/tools/test_process_tool.py b/tests/agent/tools/test_process_tool.py index fd786b9e7..5bb656705 100644 --- a/tests/agent/tools/test_process_tool.py +++ b/tests/agent/tools/test_process_tool.py @@ -1,4 +1,4 @@ -"""Tests for process_tool.py metadata.""" +"""Tests for process_tool.py — metadata and tool creation.""" import pytest from uipath.agent.models.agent import ( @@ -10,45 +10,103 @@ from uipath_langchain.agent.tools.process_tool import create_process_tool -class TestProcessToolMetadata: - """Test that process tool has correct metadata for observability.""" +@pytest.fixture +def process_resource() -> AgentProcessToolResourceConfig: + """Create a minimal process tool resource config.""" + return AgentProcessToolResourceConfig( + type=AgentToolType.PROCESS, + name="test_process", + description="Test process description", + input_schema={"type": "object", "properties": {}}, + output_schema={"type": "object", "properties": {}}, + properties=AgentProcessToolProperties( + process_name="MyProcess", + folder_path="/Shared/MyFolder", + ), + ) - @pytest.fixture - def process_resource(self): - """Create a minimal process tool resource config.""" - return AgentProcessToolResourceConfig( - type=AgentToolType.PROCESS, - name="test_process", - description="Test process description", - input_schema={"type": "object", "properties": {}}, - output_schema={"type": "object", "properties": {}}, - properties=AgentProcessToolProperties( - process_name="MyProcess", - folder_path="/Shared/MyFolder", - ), - ) - def test_process_tool_has_metadata(self, process_resource): - """Test that process tool has metadata dict.""" - tool = create_process_tool(process_resource) +@pytest.fixture +def process_resource_with_input() -> AgentProcessToolResourceConfig: + """Create a process tool resource with an input schema.""" + return AgentProcessToolResourceConfig( + type=AgentToolType.PROCESS, + name="process_with_input", + description="Process with input schema", + input_schema={ + "type": "object", + "properties": { + "name": {"type": "string"}, + "count": {"type": "integer"}, + }, + "required": ["name"], + }, + output_schema={ + "type": "object", + "properties": { + "result": {"type": "string"}, + }, + }, + properties=AgentProcessToolProperties( + process_name="InputProcess", + folder_path="/Shared/InputFolder", + ), + ) - assert tool.metadata is not None - assert isinstance(tool.metadata, dict) - def test_process_tool_metadata_has_tool_type(self, process_resource): - """Test that metadata contains tool_type for span detection.""" +class TestProcessToolCreation: + """Test process tool creation, metadata, and structural properties.""" + + def test_tool_properties_and_metadata( + self, process_resource: AgentProcessToolResourceConfig + ) -> None: tool = create_process_tool(process_resource) + + assert tool.name == "test_process" + assert tool.description == "Test process description" + assert tool.coroutine is not None assert tool.metadata is not None assert tool.metadata["tool_type"] == "process" + assert tool.metadata["display_name"] == "MyProcess" + assert tool.metadata["folder_path"] == "/Shared/MyFolder" - def test_process_tool_metadata_has_display_name(self, process_resource): - """Test that metadata contains display_name from process_name.""" - tool = create_process_tool(process_resource) + def test_tool_name_sanitized_for_special_chars(self) -> None: + resource = AgentProcessToolResourceConfig( + type=AgentToolType.PROCESS, + name="my process (v2)!", + description="desc", + input_schema={"type": "object", "properties": {}}, + output_schema={"type": "object", "properties": {}}, + properties=AgentProcessToolProperties( + process_name="SanitizeMe", + folder_path="/Shared", + ), + ) + tool = create_process_tool(resource) + assert " " not in tool.name + assert "(" not in tool.name + assert "!" not in tool.name + + def test_tool_with_input_schema( + self, process_resource_with_input: AgentProcessToolResourceConfig + ) -> None: + tool = create_process_tool(process_resource_with_input) + assert tool.args_schema is not None assert tool.metadata is not None - assert tool.metadata["display_name"] == "MyProcess" + assert "args_schema" in tool.metadata - def test_process_tool_metadata_has_folder_path(self, process_resource): - """Test that metadata contains folder_path for span attributes.""" - tool = create_process_tool(process_resource) + def test_none_folder_path_in_metadata(self) -> None: + resource = AgentProcessToolResourceConfig( + type=AgentToolType.PROCESS, + name="no_folder", + description="desc", + input_schema={"type": "object", "properties": {}}, + output_schema={"type": "object", "properties": {}}, + properties=AgentProcessToolProperties( + process_name="NoFolderProcess", + folder_path=None, + ), + ) + tool = create_process_tool(resource) assert tool.metadata is not None - assert tool.metadata["folder_path"] == "/Shared/MyFolder" + assert tool.metadata["folder_path"] is None