Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 26 additions & 3 deletions src/uipath_langchain/agent/tools/context_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ class ContextOutputSchemaModel(BaseModel):
input_schema=input_model,
output_schema=output_model.model_json_schema(),
example_calls=[], # Examples cannot be provided for context.
recording=False,
)
async def context_tool_fn() -> dict[str, Any]:
return {"documents": await retriever.ainvoke(static_query_value)}
Expand All @@ -96,6 +97,7 @@ class ContextInputSchemaModel(BaseModel):
input_schema=input_model.model_json_schema(),
output_schema=output_model.model_json_schema(),
example_calls=[], # Examples cannot be provided for context.
recording=False,
)
async def context_tool_fn(query: str) -> dict[str, Any]:
return {"documents": await retriever.ainvoke(query)}
Expand All @@ -107,8 +109,15 @@ async def context_tool_fn(query: str) -> dict[str, Any]:
coroutine=context_tool_fn,
output_type=output_model,
metadata={
"tool_type": "context",
"tool_type": "context_grounding",
"display_name": resource.name,
"retrieval_mode": "SemanticSearch",
"number_of_results": resource.settings.result_count,
**(
{"static_query": static_query_value}
if is_static_query(resource)
else {}
),
},
)

Expand Down Expand Up @@ -145,6 +154,7 @@ def handle_deep_rag(
input_schema=input_model,
output_schema=output_model.model_json_schema(),
example_calls=[], # Examples cannot be provided for context.
recording=False,
)
async def context_tool_fn() -> dict[str, Any]:
# TODO: add glob pattern support
Expand Down Expand Up @@ -173,6 +183,7 @@ class DeepRagInputSchemaModel(BaseModel):
input_schema=input_model.model_json_schema(),
output_schema=output_model.model_json_schema(),
example_calls=[], # Examples cannot be provided for context.
recording=False,
)
async def context_tool_fn(query: str) -> dict[str, Any]:
# TODO: add glob pattern support
Expand All @@ -192,8 +203,11 @@ async def context_tool_fn(query: str) -> dict[str, Any]:
coroutine=context_tool_fn,
output_type=output_model,
metadata={
"tool_type": "context",
"tool_type": "context_grounding",
"display_name": resource.name,
"retrieval_mode": "DeepRag",
"citation_mode": citation_mode.value,
**({"static_query": static_prompt} if is_static_query(resource) else {}),
},
)

Expand Down Expand Up @@ -254,6 +268,7 @@ class StaticBatchTransformSchemaModel(BaseModel):
input_schema=input_model.model_json_schema(),
output_schema=output_model.model_json_schema(),
example_calls=[], # Examples cannot be provided for context.
recording=False,
)
async def context_tool_fn(
destination_path: str = "output.csv",
Expand Down Expand Up @@ -291,6 +306,7 @@ class DynamicBatchTransformSchemaModel(BaseModel):
input_schema=input_model.model_json_schema(),
output_schema=output_model.model_json_schema(),
example_calls=[], # Examples cannot be provided for context.
recording=False,
)
async def context_tool_fn(
query: str, destination_path: str = "output.csv"
Expand All @@ -315,8 +331,15 @@ async def context_tool_fn(
coroutine=context_tool_fn,
output_type=output_model,
metadata={
"tool_type": "context",
"tool_type": "context_grounding",
"display_name": resource.name,
"retrieval_mode": "BatchTransform",
"output_columns": [
{"name": col.name, "description": col.description}
for col in batch_transform_output_columns
],
"web_search_grounding": enable_web_search_grounding,
**({"static_query": static_prompt} if is_static_query(resource) else {}),
},
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,38 @@
from uipath_langchain.agent.tools.tool_node import ToolWrapperReturnType
from uipath_langchain.agent.tools.utils import sanitize_tool_name

# Define the output schema with job-attachment
BATCH_TRANSFORM_OUTPUT_SCHEMA = {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/job-attachment",
"description": "The transformed result file as an attachment",
}
},
"required": ["result"],
"definitions": {
"job-attachment": {
"type": "object",
"properties": {
"ID": {"type": "string", "description": "Orchestrator attachment key"},
"FullName": {"type": "string", "description": "File name"},
"MimeType": {
"type": "string",
"description": "The MIME type of the content",
},
"Metadata": {
"type": "object",
"description": "Dictionary<string, string> of metadata",
"additionalProperties": {"type": "string"},
},
},
"required": ["ID", "FullName", "MimeType"],
"x-uipath-resource-kind": "JobAttachment",
}
},
}


def create_batch_transform_tool(
resource: AgentInternalToolResourceConfig, llm: BaseChatModel
Expand Down Expand Up @@ -84,7 +116,8 @@ def create_batch_transform_tool(

# Create input model from modified schema
input_model = create_model(input_schema)
output_model = create_model(resource.output_schema)
# Create output model from schema with job-attachment definition
output_model = create_model(BATCH_TRANSFORM_OUTPUT_SCHEMA)

async def batch_transform_tool_fn(**kwargs: Any) -> dict[str, Any]:
query = kwargs.get("query") if not is_query_static else static_query
Expand Down Expand Up @@ -144,7 +177,10 @@ async def create_ephemeral_index():
)
)

return await invoke_batch_transform()
result_attachment = await invoke_batch_transform()

# The resume trigger returns the attachment info directly
return {"result": result_attachment}

# Import here to avoid circular dependency
from uipath_langchain.agent.wrappers import get_job_attachment_wrapper
Expand All @@ -167,10 +203,17 @@ async def batch_transform_tool_wrapper(
output_type=output_model,
argument_properties=resource.argument_properties,
metadata={
"tool_type": resource.type.lower(),
"tool_type": "context_grounding",
"display_name": tool_name,
"args_schema": input_model,
"output_schema": output_model,
"retrieval_mode": "BatchTransform",
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: metadata creation can be extracted in to common helper function to avoid duplication across multiple tools...

"output_columns": [
{"name": col.name, "description": col.description}
for col in batch_transform_output_columns
],
"web_search_grounding": static_web_search,
**({"static_query": static_query} if is_query_static else {}),
},
)
tool.set_tool_wrappers(awrapper=batch_transform_tool_wrapper)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ async def deeprag_tool_fn(**kwargs: Any) -> dict[str, Any]:
input_schema=input_model.model_json_schema() if input_model else None,
output_schema=output_model.model_json_schema(),
example_calls=[], # Examples cannot be provided for internal tools
recording=False,
)
async def invoke_deeprag():
@task
Expand Down Expand Up @@ -150,10 +151,13 @@ async def deeprag_tool_wrapper(
output_type=output_model,
argument_properties=resource.argument_properties,
metadata={
"tool_type": resource.type.lower(),
"tool_type": "context_grounding",
"display_name": tool_name,
"args_schema": input_model,
"output_schema": output_model,
"retrieval_mode": "DeepRag",
"citation_mode": citation_mode.value,
**({"static_query": static_query} if is_query_static else {}),
},
)
tool.set_tool_wrappers(awrapper=deeprag_tool_wrapper)
Expand Down
2 changes: 1 addition & 1 deletion tests/agent/wrappers/test_job_attachment_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def assert_command_success(
result: Command[Any],
tool_call_id: str = "call_123",
job_attachments: dict[str, Any] | None = None,
expected_content: str | None = "{'result': 'success'}",
expected_content: str | None = '{"result": "success"}',
):
"""Assert that result is a successful Command with expected structure."""
if job_attachments is None:
Expand Down
Loading