diff --git a/docs/interrupt_models.md b/docs/interrupt_models.md index f0d32f4c..827575ee 100644 --- a/docs/interrupt_models.md +++ b/docs/interrupt_models.md @@ -9,7 +9,9 @@ UiPath platform and the Langchain coded agents. ### 1. InvokeProcess -The `InvokeProcess` model is utilized to invoke a process within UiPath cloud platform. Upon completion of the invoked process, the current agent will automatically resume execution. +The `InvokeProcess` model is utilized to invoke a process within the UiPath cloud platform. +This process can be of various types, including API workflows, Agents or RPA automation. +Upon completion of the invoked process, the current agent will automatically resume execution. #### Attributes: - **name** (str): The name of the process to invoke. @@ -20,6 +22,8 @@ The `InvokeProcess` model is utilized to invoke a process within UiPath cloud pl process_output = interrupt(InvokeProcess(name="MyProcess", input_arguments={"arg1": "value1"})) ``` +For a practical implementation of the `InvokeProcess` model, refer to the sample usage in the [planner.py](../../samples/multi-agent-planner-researcher-coder-distributed/src/multi-agent-distributed/planner.py#L184) file. This example demonstrates how to invoke a process with dynamic input arguments, showcasing the integration of the interrupt functionality within a multi-agent system or a system where an agent integrates with RPA processes and API workflows. + --- ### 2. WaitJob diff --git a/docs/sample_images/coder-agent-package-overview.png b/docs/sample_images/coder-agent-package-overview.png new file mode 100644 index 00000000..a778f1f3 Binary files /dev/null and b/docs/sample_images/coder-agent-package-overview.png differ diff --git a/docs/sample_images/coder-agent-process-configuration.png b/docs/sample_images/coder-agent-process-configuration.png new file mode 100644 index 00000000..522fa08d Binary files /dev/null and b/docs/sample_images/coder-agent-process-configuration.png differ diff --git a/docs/sample_images/planner-agent-package-overview.png b/docs/sample_images/planner-agent-package-overview.png new file mode 100644 index 00000000..ceefc873 Binary files /dev/null and b/docs/sample_images/planner-agent-package-overview.png differ diff --git a/docs/sample_images/planner-agent-process-configuration.png b/docs/sample_images/planner-agent-process-configuration.png new file mode 100644 index 00000000..6724fc6f Binary files /dev/null and b/docs/sample_images/planner-agent-process-configuration.png differ diff --git a/docs/sample_images/researcher-agent-package-overview.png b/docs/sample_images/researcher-agent-package-overview.png new file mode 100644 index 00000000..37d2c6ca Binary files /dev/null and b/docs/sample_images/researcher-agent-package-overview.png differ diff --git a/docs/sample_images/researcher-agent-process-configuration.png b/docs/sample_images/researcher-agent-process-configuration.png new file mode 100644 index 00000000..d00a92f6 Binary files /dev/null and b/docs/sample_images/researcher-agent-process-configuration.png differ diff --git a/samples/multi-agent-planner-researcher-coder-distributed/README.md b/samples/multi-agent-planner-researcher-coder-distributed/README.md index 5c7195f9..d1142b0b 100644 --- a/samples/multi-agent-planner-researcher-coder-distributed/README.md +++ b/samples/multi-agent-planner-researcher-coder-distributed/README.md @@ -1,18 +1,18 @@ # Multi-Agent Task Execution System -This repository contains a multi-agent system that breaks down complex tasks into discrete steps and routes them to specialized agents for execution. The system consists of three main components: +This repository implements a multi-agent system that decomposes complex tasks into discrete steps, routing them to specialized agents for execution. The system comprises three main components: -1. **Planner agent**: Orchestrates the workflow by planning task execution and routing subtasks to worker agents -2. **Researcher Agent**: Finds information, formulas, and reference material without performing calculations -3. **Coder Agent**: Performs calculations and evaluates formulas with specific values +1. **Planner Agent**: Orchestrates the workflow by planning task execution and routing subtasks to worker agents. +2. **Researcher Agent**: Gathers information, formulas, and reference materials without performing calculations. +3. **Coder Agent**: Executes calculations and evaluates formulas with specific values. -Each agent functions as an independent entrypoint and can be deployed as a separate process, while still being packaged together as part of an Orchestrator Agent Package. +Each agent operates as an independent entry point and can be deployed as a separate process, while still being packaged together as part of an Orchestrator Agent Package. ## System Architecture -The system uses LangGraph to create a directed graph of agents that can communicate and pass state to each other. +The system utilizes LangGraph to create a directed graph of agents that can communicate and share state. -### Planner Graph +### Planner Agent Graph ```mermaid --- config: @@ -34,7 +34,7 @@ graph TD; classDef last fill:#bfb6fc ``` -### Researcher Agent +### Researcher Agent Graph ```mermaid --- config: @@ -61,7 +61,7 @@ graph TD; classDef last fill:#bfb6fc ``` -### Coder Agent +### Coder Agent Graph ```mermaid --- config: @@ -91,20 +91,20 @@ graph TD; ## Agent Responsibilities - **Planner Agent**: - - Takes user questions and creates execution plans - - Routes tasks to appropriate worker agents - - Manages the execution flow and state tracking - - Returns final results to the user + - Takes user questions and formulates execution plans. + - Routes tasks to appropriate worker agents. + - Manages execution flow and state tracking. + - Returns final results to the user. - **Researcher Agent**: - - Retrieves information using a Tavily search tool - - Provides factual content, definitions, and formulas - - Never performs calculations (strictly enforced) + - Retrieves information using a Tavily search tool. + - Provides factual content, definitions, and formulas. + - Does not perform calculations (strictly enforced). - **Coder Agent**: - - Performs calculations using Python code execution - - Evaluates formulas with specific input values - - Returns precise numerical results + - Executes calculations using Python code. + - Evaluates formulas with specific input values. + - Returns precise numerical results. ## Usage @@ -118,7 +118,7 @@ uipath run planner '{"question": "First, please state the Pythagorean theorem. G ### Debugging Individual Agents -You can debug individual agents by directly invoking them: +You can debug individual agents by invoking them directly: #### Researcher Agent Run the researcher agent with: @@ -136,15 +136,100 @@ uipath run coder '{"messages":[{"content":"Let me help you state the Pythagorean ## Sample Workflow -1. User submits a question about the Pythagorean theorem +1. User submits a question about the Pythagorean theorem. 2. Planner creates an execution plan with two steps: - - Step 1: Researcher agent retrieves the Pythagorean theorem formula - - Step 2: Coder agent applies the formula to calculate the result for a=2, b=3 -3. Planner executes Step 1 by invoking the researcher agent -4. Researcher agent returns the formula a² + b² = c² -5. Planner executes Step 2 by invoking the coder agent -6. Coder agent calculates c = √(2² + 3²) = √(4 + 9) = √13 ≈ 3.606 -7. Planner combines the responses and returns the final answer to the user + - Step 1: Researcher agent retrieves the Pythagorean theorem formula. + - Step 2: Coder agent applies the formula to calculate the result for a=2, b=3. +3. Planner executes Step 1 by invoking the researcher agent. +4. Researcher agent returns the formula a² + b² = c². +5. Planner executes Step 2 by invoking the coder agent. +6. Coder agent calculates c = √(2² + 3²) = √(4 + 9) = √13 ≈ 3.606. +7. Planner combines the responses and returns the final answer to the user. + +## Steps to Execute Project on UiPath Cloud Platform + +1. **Clone the Repository** + ```bash + git clone https://github.com/UiPath/uipath-langchain-python.git + ``` + +2. **Navigate to the Sample Directory** + - **Windows:** + ```bash + cd .\uipath-langchain-python\samples\multi-agent-planner-researcher-coder-distributed + ``` + + - **Unix-like Systems (Linux, macOS):** + ```bash + cd ./uipath-langchain-python/samples/multi-agent-planner-researcher-coder-distributed + ``` + +3. **Create and Activate a Virtual Python Environment** + ```bash + pip install uv + uv venv -p 3.11 .venv + .venv\Scripts\activate # Windows + source .venv/bin/activate # Unix-like Systems + uv sync + ``` + +4. **Authenticate with UiPath Cloud Platform** + ```bash + uipath auth + ``` + > **Note:** After successful authentication in the browser, select the tenant for publishing the agent package. + ``` + Received log data + Received authentication information + Available tenants: + 0: cosmin + 1: DefaultTenant + 2: Demo + 3: lucian + 4: Solutions + 5: SolutionsTest + 6: Test + 7: TestRoles + Select tenant: 2 + ``` + +5. **Package and Publish Agents** + ```bash + uipath pack + uipath publish + ``` + > **Note:** You will need to select the feed for publishing: + ``` + Select feed type: + 0: Tenant package feed + 1: Personal workspace + Select feed: 0 + ``` + +6. **Create Agent Processes in Orchestrator** + - **Planner Agent** + ![planner-agent-package-overview](../../docs/sample_images/planner-agent-package-overview.png) + ![planner-agent-process-configuration](../../docs/sample_images/planner-agent-process-configuration.png) + + - **Researcher Agent** + ![researcher-agent-package-overview](../../docs/sample_images/researcher-agent-package-overview.png) + ![researcher-agent-process-configuration](../../docs/sample_images/researcher-agent-process-configuration.png) + + - **Coder Agent** + ![coder-agent-package-overview](../../docs/sample_images/coder-agent-package-overview.png) + ![coder-agent-process-configuration](../../docs/sample_images/coder-agent-process-configuration.png) + + > **Note:** Ensure that the display names for the coder and researcher agent processes are *coder-agent* and *researcher-agent*. + +7. **Run the Planner Agent with Any Input Question** + > **Tip:** For a five-step action plan, consider using the following input: + ``` + Could you find a Python solution for the N-Queens puzzle for N=8? Please analyze why this solution works, + considering the key programming concepts it employs. + Then, revise the solution to handle a dynamic value of N, where N is any positive integer. + After that, research the time and space complexity of this new solution. + Lastly, demonstrate this revised Python solution with N=10. + ``` ## Implementation Details diff --git a/samples/multi-agent-planner-researcher-coder-distributed/pyproject.toml b/samples/multi-agent-planner-researcher-coder-distributed/pyproject.toml index ae5483f8..92ecd49e 100644 --- a/samples/multi-agent-planner-researcher-coder-distributed/pyproject.toml +++ b/samples/multi-agent-planner-researcher-coder-distributed/pyproject.toml @@ -3,7 +3,7 @@ name = "multi-agents-distributed" version = "0.0.1" description = "Supervisor agent that coordinates between a researcher and a coder" authors = [ - { name = "Radu Mocanu" } + { name = "Radu Mocanu", email = "radu.mocanu@uipath.com" } ] requires-python = ">=3.10" dependencies = [ diff --git a/samples/multi-agent-planner-researcher-coder-distributed/uipath.json b/samples/multi-agent-planner-researcher-coder-distributed/uipath.json new file mode 100644 index 00000000..2f247ba9 --- /dev/null +++ b/samples/multi-agent-planner-researcher-coder-distributed/uipath.json @@ -0,0 +1,2793 @@ +{ + "entryPoints": [ + { + "filePath": "planner", + "uniqueId": "86baba5d-1eb6-4516-8352-2364c111605c", + "type": "agent", + "input": { + "type": "object", + "properties": { + "question": { + "title": "Question", + "type": "string" + } + }, + "required": [ + "question" + ] + }, + "output": { + "type": "object", + "properties": { + "answer": { + "title": "Answer", + "type": "string" + } + }, + "required": [ + "answer" + ] + } + }, + { + "filePath": "researcher", + "uniqueId": "cc9eb8e7-2484-4d97-aa93-df72e264d779", + "type": "agent", + "input": { + "type": "object", + "properties": { + "messages": { + "items": { + "oneOf": [ + { + "additionalProperties": true, + "description": "Message from an AI.\n\nAIMessage is returned from a chat model as a response to a prompt.\n\nThis message represents the output of the model and consists of both\nthe raw output as returned by the model together standardized fields\n(e.g., tool calls, usage metadata) added by the LangChain framework.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "ai", + "default": "ai", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "example": { + "default": false, + "title": "Example", + "type": "boolean" + }, + "tool_calls": { + "default": [], + "items": { + "description": "Represents a request to call a tool.\n\nExample:\n\n .. code-block:: python\n\n {\n \"name\": \"foo\",\n \"args\": {\"a\": 1},\n \"id\": \"123\"\n }\n\n This represents a request to call the tool named \"foo\" with arguments {\"a\": 1}\n and an identifier of \"123\".", + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "args": { + "additionalProperties": true, + "title": "Args", + "type": "object" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Id" + }, + "type": { + "const": "tool_call", + "title": "Type", + "type": "string" + } + }, + "required": [ + "name", + "args", + "id" + ], + "title": "ToolCall", + "type": "object" + }, + "title": "Tool Calls", + "type": "array" + }, + "invalid_tool_calls": { + "default": [], + "items": { + "description": "Allowance for errors made by LLM.\n\nHere we add an `error` key to surface errors made during generation\n(e.g., invalid JSON arguments.)", + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Name" + }, + "args": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Args" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Id" + }, + "error": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Error" + }, + "type": { + "const": "invalid_tool_call", + "title": "Type", + "type": "string" + } + }, + "required": [ + "name", + "args", + "id", + "error" + ], + "title": "InvalidToolCall", + "type": "object" + }, + "title": "Invalid Tool Calls", + "type": "array" + }, + "usage_metadata": { + "anyOf": [ + { + "description": "Usage metadata for a message, such as token counts.\n\nThis is a standard representation of token usage that is consistent across models.\n\nExample:\n\n .. code-block:: python\n\n {\n \"input_tokens\": 350,\n \"output_tokens\": 240,\n \"total_tokens\": 590,\n \"input_token_details\": {\n \"audio\": 10,\n \"cache_creation\": 200,\n \"cache_read\": 100,\n },\n \"output_token_details\": {\n \"audio\": 10,\n \"reasoning\": 200,\n }\n }\n\n.. versionchanged:: 0.3.9\n\n Added ``input_token_details`` and ``output_token_details``.", + "properties": { + "input_tokens": { + "title": "Input Tokens", + "type": "integer" + }, + "output_tokens": { + "title": "Output Tokens", + "type": "integer" + }, + "total_tokens": { + "title": "Total Tokens", + "type": "integer" + }, + "input_token_details": { + "description": "Breakdown of input token counts.\n\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\n\nExample:\n\n .. code-block:: python\n\n {\n \"audio\": 10,\n \"cache_creation\": 200,\n \"cache_read\": 100,\n }\n\n.. versionadded:: 0.3.9", + "properties": { + "audio": { + "title": "Audio", + "type": "integer" + }, + "cache_creation": { + "title": "Cache Creation", + "type": "integer" + }, + "cache_read": { + "title": "Cache Read", + "type": "integer" + } + }, + "title": "InputTokenDetails", + "type": "object" + }, + "output_token_details": { + "description": "Breakdown of output token counts.\n\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\n\nExample:\n\n .. code-block:: python\n\n {\n \"audio\": 10,\n \"reasoning\": 200,\n }\n\n.. versionadded:: 0.3.9", + "properties": { + "audio": { + "title": "Audio", + "type": "integer" + }, + "reasoning": { + "title": "Reasoning", + "type": "integer" + } + }, + "title": "OutputTokenDetails", + "type": "object" + } + }, + "required": [ + "input_tokens", + "output_tokens", + "total_tokens" + ], + "title": "UsageMetadata", + "type": "object" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "required": [ + "content" + ], + "title": "AIMessage", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Message from a human.\n\nHumanMessages are messages that are passed in from a human to the model.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content=\"You are a helpful assistant! Your name is Bob.\"\n ),\n HumanMessage(\n content=\"What is your name?\"\n )\n ]\n\n # Instantiate a chat model and invoke it with the messages\n model = ...\n print(model.invoke(messages))", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "human", + "default": "human", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "example": { + "default": false, + "title": "Example", + "type": "boolean" + } + }, + "required": [ + "content" + ], + "title": "HumanMessage", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Message that can be assigned an arbitrary speaker (i.e. role).", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "chat", + "default": "chat", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "role": { + "title": "Role", + "type": "string" + } + }, + "required": [ + "content", + "role" + ], + "title": "ChatMessage", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Message for priming AI behavior.\n\nThe system message is usually passed in as the first of a sequence\nof input messages.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content=\"You are a helpful assistant! Your name is Bob.\"\n ),\n HumanMessage(\n content=\"What is your name?\"\n )\n ]\n\n # Define a chat model and invoke it with the messages\n print(model.invoke(messages))", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "system", + "default": "system", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + } + }, + "required": [ + "content" + ], + "title": "SystemMessage", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Message for passing the result of executing a tool back to a model.\n\nFunctionMessage are an older version of the ToolMessage schema, and\ndo not contain the tool_call_id field.\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "function", + "default": "function", + "title": "Type", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + } + }, + "required": [ + "content", + "name" + ], + "title": "FunctionMessage", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Message for passing the result of executing a tool back to a model.\n\nToolMessages contain the result of a tool invocation. Typically, the result\nis encoded inside the `content` field.\n\nExample: A ToolMessage representing a result of 42 from a tool call with id\n\n .. code-block:: python\n\n from langchain_core.messages import ToolMessage\n\n ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')\n\n\nExample: A ToolMessage where only part of the tool output is sent to the model\n and the full output is passed in to artifact.\n\n .. versionadded:: 0.2.17\n\n .. code-block:: python\n\n from langchain_core.messages import ToolMessage\n\n tool_output = {\n \"stdout\": \"From the graph we can see that the correlation between x and y is ...\",\n \"stderr\": None,\n \"artifacts\": {\"type\": \"image\", \"base64_data\": \"/9j/4gIcSU...\"},\n }\n\n ToolMessage(\n content=tool_output[\"stdout\"],\n artifact=tool_output,\n tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL',\n )\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "tool", + "default": "tool", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "tool_call_id": { + "title": "Tool Call Id", + "type": "string" + }, + "artifact": { + "default": null, + "title": "Artifact" + }, + "status": { + "default": "success", + "enum": [ + "success", + "error" + ], + "title": "Status", + "type": "string" + } + }, + "required": [ + "content", + "tool_call_id" + ], + "title": "ToolMessage", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Message chunk from an AI.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "AIMessageChunk", + "default": "AIMessageChunk", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "example": { + "default": false, + "title": "Example", + "type": "boolean" + }, + "tool_calls": { + "default": [], + "items": { + "description": "Represents a request to call a tool.\n\nExample:\n\n .. code-block:: python\n\n {\n \"name\": \"foo\",\n \"args\": {\"a\": 1},\n \"id\": \"123\"\n }\n\n This represents a request to call the tool named \"foo\" with arguments {\"a\": 1}\n and an identifier of \"123\".", + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "args": { + "additionalProperties": true, + "title": "Args", + "type": "object" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Id" + }, + "type": { + "const": "tool_call", + "title": "Type", + "type": "string" + } + }, + "required": [ + "name", + "args", + "id" + ], + "title": "ToolCall", + "type": "object" + }, + "title": "Tool Calls", + "type": "array" + }, + "invalid_tool_calls": { + "default": [], + "items": { + "description": "Allowance for errors made by LLM.\n\nHere we add an `error` key to surface errors made during generation\n(e.g., invalid JSON arguments.)", + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Name" + }, + "args": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Args" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Id" + }, + "error": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Error" + }, + "type": { + "const": "invalid_tool_call", + "title": "Type", + "type": "string" + } + }, + "required": [ + "name", + "args", + "id", + "error" + ], + "title": "InvalidToolCall", + "type": "object" + }, + "title": "Invalid Tool Calls", + "type": "array" + }, + "usage_metadata": { + "anyOf": [ + { + "description": "Usage metadata for a message, such as token counts.\n\nThis is a standard representation of token usage that is consistent across models.\n\nExample:\n\n .. code-block:: python\n\n {\n \"input_tokens\": 350,\n \"output_tokens\": 240,\n \"total_tokens\": 590,\n \"input_token_details\": {\n \"audio\": 10,\n \"cache_creation\": 200,\n \"cache_read\": 100,\n },\n \"output_token_details\": {\n \"audio\": 10,\n \"reasoning\": 200,\n }\n }\n\n.. versionchanged:: 0.3.9\n\n Added ``input_token_details`` and ``output_token_details``.", + "properties": { + "input_tokens": { + "title": "Input Tokens", + "type": "integer" + }, + "output_tokens": { + "title": "Output Tokens", + "type": "integer" + }, + "total_tokens": { + "title": "Total Tokens", + "type": "integer" + }, + "input_token_details": { + "description": "Breakdown of input token counts.\n\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\n\nExample:\n\n .. code-block:: python\n\n {\n \"audio\": 10,\n \"cache_creation\": 200,\n \"cache_read\": 100,\n }\n\n.. versionadded:: 0.3.9", + "properties": { + "audio": { + "title": "Audio", + "type": "integer" + }, + "cache_creation": { + "title": "Cache Creation", + "type": "integer" + }, + "cache_read": { + "title": "Cache Read", + "type": "integer" + } + }, + "title": "InputTokenDetails", + "type": "object" + }, + "output_token_details": { + "description": "Breakdown of output token counts.\n\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\n\nExample:\n\n .. code-block:: python\n\n {\n \"audio\": 10,\n \"reasoning\": 200,\n }\n\n.. versionadded:: 0.3.9", + "properties": { + "audio": { + "title": "Audio", + "type": "integer" + }, + "reasoning": { + "title": "Reasoning", + "type": "integer" + } + }, + "title": "OutputTokenDetails", + "type": "object" + } + }, + "required": [ + "input_tokens", + "output_tokens", + "total_tokens" + ], + "title": "UsageMetadata", + "type": "object" + }, + { + "type": "null" + } + ], + "default": null + }, + "tool_call_chunks": { + "default": [], + "items": { + "description": "A chunk of a tool call (e.g., as part of a stream).\n\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\nall string attributes are concatenated. Chunks are only merged if their\nvalues of `index` are equal and not None.\n\nExample:\n\n.. code-block:: python\n\n left_chunks = [ToolCallChunk(name=\"foo\", args='{\"a\":', index=0)]\n right_chunks = [ToolCallChunk(name=None, args='1}', index=0)]\n\n (\n AIMessageChunk(content=\"\", tool_call_chunks=left_chunks)\n + AIMessageChunk(content=\"\", tool_call_chunks=right_chunks)\n ).tool_call_chunks == [ToolCallChunk(name='foo', args='{\"a\":1}', index=0)]", + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Name" + }, + "args": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Args" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Id" + }, + "index": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Index" + }, + "type": { + "const": "tool_call_chunk", + "title": "Type", + "type": "string" + } + }, + "required": [ + "name", + "args", + "id", + "index" + ], + "title": "ToolCallChunk", + "type": "object" + }, + "title": "Tool Call Chunks", + "type": "array" + } + }, + "required": [ + "content" + ], + "title": "AIMessageChunk", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Human Message chunk.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "HumanMessageChunk", + "default": "HumanMessageChunk", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "example": { + "default": false, + "title": "Example", + "type": "boolean" + } + }, + "required": [ + "content" + ], + "title": "HumanMessageChunk", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Chat Message chunk.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "ChatMessageChunk", + "default": "ChatMessageChunk", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "role": { + "title": "Role", + "type": "string" + } + }, + "required": [ + "content", + "role" + ], + "title": "ChatMessageChunk", + "type": "object" + }, + { + "additionalProperties": true, + "description": "System Message chunk.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "SystemMessageChunk", + "default": "SystemMessageChunk", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + } + }, + "required": [ + "content" + ], + "title": "SystemMessageChunk", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Function Message chunk.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "FunctionMessageChunk", + "default": "FunctionMessageChunk", + "title": "Type", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + } + }, + "required": [ + "content", + "name" + ], + "title": "FunctionMessageChunk", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Tool Message chunk.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "ToolMessageChunk", + "default": "ToolMessageChunk", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "tool_call_id": { + "title": "Tool Call Id", + "type": "string" + }, + "artifact": { + "default": null, + "title": "Artifact" + }, + "status": { + "default": "success", + "enum": [ + "success", + "error" + ], + "title": "Status", + "type": "string" + } + }, + "required": [ + "content", + "tool_call_id" + ], + "title": "ToolMessageChunk", + "type": "object" + } + ] + }, + "title": "Messages", + "type": "array" + } + }, + "required": [ + "messages" + ] + }, + "output": { + "type": "object", + "properties": { + "answer": { + "title": "Answer", + "type": "string" + } + }, + "required": [ + "answer" + ] + } + }, + { + "filePath": "coder", + "uniqueId": "ef2df077-e0f3-4944-b3ea-97a505c65add", + "type": "agent", + "input": { + "type": "object", + "properties": { + "messages": { + "items": { + "oneOf": [ + { + "additionalProperties": true, + "description": "Message from an AI.\n\nAIMessage is returned from a chat model as a response to a prompt.\n\nThis message represents the output of the model and consists of both\nthe raw output as returned by the model together standardized fields\n(e.g., tool calls, usage metadata) added by the LangChain framework.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "ai", + "default": "ai", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "example": { + "default": false, + "title": "Example", + "type": "boolean" + }, + "tool_calls": { + "default": [], + "items": { + "description": "Represents a request to call a tool.\n\nExample:\n\n .. code-block:: python\n\n {\n \"name\": \"foo\",\n \"args\": {\"a\": 1},\n \"id\": \"123\"\n }\n\n This represents a request to call the tool named \"foo\" with arguments {\"a\": 1}\n and an identifier of \"123\".", + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "args": { + "additionalProperties": true, + "title": "Args", + "type": "object" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Id" + }, + "type": { + "const": "tool_call", + "title": "Type", + "type": "string" + } + }, + "required": [ + "name", + "args", + "id" + ], + "title": "ToolCall", + "type": "object" + }, + "title": "Tool Calls", + "type": "array" + }, + "invalid_tool_calls": { + "default": [], + "items": { + "description": "Allowance for errors made by LLM.\n\nHere we add an `error` key to surface errors made during generation\n(e.g., invalid JSON arguments.)", + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Name" + }, + "args": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Args" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Id" + }, + "error": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Error" + }, + "type": { + "const": "invalid_tool_call", + "title": "Type", + "type": "string" + } + }, + "required": [ + "name", + "args", + "id", + "error" + ], + "title": "InvalidToolCall", + "type": "object" + }, + "title": "Invalid Tool Calls", + "type": "array" + }, + "usage_metadata": { + "anyOf": [ + { + "description": "Usage metadata for a message, such as token counts.\n\nThis is a standard representation of token usage that is consistent across models.\n\nExample:\n\n .. code-block:: python\n\n {\n \"input_tokens\": 350,\n \"output_tokens\": 240,\n \"total_tokens\": 590,\n \"input_token_details\": {\n \"audio\": 10,\n \"cache_creation\": 200,\n \"cache_read\": 100,\n },\n \"output_token_details\": {\n \"audio\": 10,\n \"reasoning\": 200,\n }\n }\n\n.. versionchanged:: 0.3.9\n\n Added ``input_token_details`` and ``output_token_details``.", + "properties": { + "input_tokens": { + "title": "Input Tokens", + "type": "integer" + }, + "output_tokens": { + "title": "Output Tokens", + "type": "integer" + }, + "total_tokens": { + "title": "Total Tokens", + "type": "integer" + }, + "input_token_details": { + "description": "Breakdown of input token counts.\n\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\n\nExample:\n\n .. code-block:: python\n\n {\n \"audio\": 10,\n \"cache_creation\": 200,\n \"cache_read\": 100,\n }\n\n.. versionadded:: 0.3.9", + "properties": { + "audio": { + "title": "Audio", + "type": "integer" + }, + "cache_creation": { + "title": "Cache Creation", + "type": "integer" + }, + "cache_read": { + "title": "Cache Read", + "type": "integer" + } + }, + "title": "InputTokenDetails", + "type": "object" + }, + "output_token_details": { + "description": "Breakdown of output token counts.\n\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\n\nExample:\n\n .. code-block:: python\n\n {\n \"audio\": 10,\n \"reasoning\": 200,\n }\n\n.. versionadded:: 0.3.9", + "properties": { + "audio": { + "title": "Audio", + "type": "integer" + }, + "reasoning": { + "title": "Reasoning", + "type": "integer" + } + }, + "title": "OutputTokenDetails", + "type": "object" + } + }, + "required": [ + "input_tokens", + "output_tokens", + "total_tokens" + ], + "title": "UsageMetadata", + "type": "object" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "required": [ + "content" + ], + "title": "AIMessage", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Message from a human.\n\nHumanMessages are messages that are passed in from a human to the model.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content=\"You are a helpful assistant! Your name is Bob.\"\n ),\n HumanMessage(\n content=\"What is your name?\"\n )\n ]\n\n # Instantiate a chat model and invoke it with the messages\n model = ...\n print(model.invoke(messages))", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "human", + "default": "human", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "example": { + "default": false, + "title": "Example", + "type": "boolean" + } + }, + "required": [ + "content" + ], + "title": "HumanMessage", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Message that can be assigned an arbitrary speaker (i.e. role).", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "chat", + "default": "chat", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "role": { + "title": "Role", + "type": "string" + } + }, + "required": [ + "content", + "role" + ], + "title": "ChatMessage", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Message for priming AI behavior.\n\nThe system message is usually passed in as the first of a sequence\nof input messages.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content=\"You are a helpful assistant! Your name is Bob.\"\n ),\n HumanMessage(\n content=\"What is your name?\"\n )\n ]\n\n # Define a chat model and invoke it with the messages\n print(model.invoke(messages))", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "system", + "default": "system", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + } + }, + "required": [ + "content" + ], + "title": "SystemMessage", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Message for passing the result of executing a tool back to a model.\n\nFunctionMessage are an older version of the ToolMessage schema, and\ndo not contain the tool_call_id field.\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "function", + "default": "function", + "title": "Type", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + } + }, + "required": [ + "content", + "name" + ], + "title": "FunctionMessage", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Message for passing the result of executing a tool back to a model.\n\nToolMessages contain the result of a tool invocation. Typically, the result\nis encoded inside the `content` field.\n\nExample: A ToolMessage representing a result of 42 from a tool call with id\n\n .. code-block:: python\n\n from langchain_core.messages import ToolMessage\n\n ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')\n\n\nExample: A ToolMessage where only part of the tool output is sent to the model\n and the full output is passed in to artifact.\n\n .. versionadded:: 0.2.17\n\n .. code-block:: python\n\n from langchain_core.messages import ToolMessage\n\n tool_output = {\n \"stdout\": \"From the graph we can see that the correlation between x and y is ...\",\n \"stderr\": None,\n \"artifacts\": {\"type\": \"image\", \"base64_data\": \"/9j/4gIcSU...\"},\n }\n\n ToolMessage(\n content=tool_output[\"stdout\"],\n artifact=tool_output,\n tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL',\n )\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "tool", + "default": "tool", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "tool_call_id": { + "title": "Tool Call Id", + "type": "string" + }, + "artifact": { + "default": null, + "title": "Artifact" + }, + "status": { + "default": "success", + "enum": [ + "success", + "error" + ], + "title": "Status", + "type": "string" + } + }, + "required": [ + "content", + "tool_call_id" + ], + "title": "ToolMessage", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Message chunk from an AI.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "AIMessageChunk", + "default": "AIMessageChunk", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "example": { + "default": false, + "title": "Example", + "type": "boolean" + }, + "tool_calls": { + "default": [], + "items": { + "description": "Represents a request to call a tool.\n\nExample:\n\n .. code-block:: python\n\n {\n \"name\": \"foo\",\n \"args\": {\"a\": 1},\n \"id\": \"123\"\n }\n\n This represents a request to call the tool named \"foo\" with arguments {\"a\": 1}\n and an identifier of \"123\".", + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "args": { + "additionalProperties": true, + "title": "Args", + "type": "object" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Id" + }, + "type": { + "const": "tool_call", + "title": "Type", + "type": "string" + } + }, + "required": [ + "name", + "args", + "id" + ], + "title": "ToolCall", + "type": "object" + }, + "title": "Tool Calls", + "type": "array" + }, + "invalid_tool_calls": { + "default": [], + "items": { + "description": "Allowance for errors made by LLM.\n\nHere we add an `error` key to surface errors made during generation\n(e.g., invalid JSON arguments.)", + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Name" + }, + "args": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Args" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Id" + }, + "error": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Error" + }, + "type": { + "const": "invalid_tool_call", + "title": "Type", + "type": "string" + } + }, + "required": [ + "name", + "args", + "id", + "error" + ], + "title": "InvalidToolCall", + "type": "object" + }, + "title": "Invalid Tool Calls", + "type": "array" + }, + "usage_metadata": { + "anyOf": [ + { + "description": "Usage metadata for a message, such as token counts.\n\nThis is a standard representation of token usage that is consistent across models.\n\nExample:\n\n .. code-block:: python\n\n {\n \"input_tokens\": 350,\n \"output_tokens\": 240,\n \"total_tokens\": 590,\n \"input_token_details\": {\n \"audio\": 10,\n \"cache_creation\": 200,\n \"cache_read\": 100,\n },\n \"output_token_details\": {\n \"audio\": 10,\n \"reasoning\": 200,\n }\n }\n\n.. versionchanged:: 0.3.9\n\n Added ``input_token_details`` and ``output_token_details``.", + "properties": { + "input_tokens": { + "title": "Input Tokens", + "type": "integer" + }, + "output_tokens": { + "title": "Output Tokens", + "type": "integer" + }, + "total_tokens": { + "title": "Total Tokens", + "type": "integer" + }, + "input_token_details": { + "description": "Breakdown of input token counts.\n\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\n\nExample:\n\n .. code-block:: python\n\n {\n \"audio\": 10,\n \"cache_creation\": 200,\n \"cache_read\": 100,\n }\n\n.. versionadded:: 0.3.9", + "properties": { + "audio": { + "title": "Audio", + "type": "integer" + }, + "cache_creation": { + "title": "Cache Creation", + "type": "integer" + }, + "cache_read": { + "title": "Cache Read", + "type": "integer" + } + }, + "title": "InputTokenDetails", + "type": "object" + }, + "output_token_details": { + "description": "Breakdown of output token counts.\n\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\n\nExample:\n\n .. code-block:: python\n\n {\n \"audio\": 10,\n \"reasoning\": 200,\n }\n\n.. versionadded:: 0.3.9", + "properties": { + "audio": { + "title": "Audio", + "type": "integer" + }, + "reasoning": { + "title": "Reasoning", + "type": "integer" + } + }, + "title": "OutputTokenDetails", + "type": "object" + } + }, + "required": [ + "input_tokens", + "output_tokens", + "total_tokens" + ], + "title": "UsageMetadata", + "type": "object" + }, + { + "type": "null" + } + ], + "default": null + }, + "tool_call_chunks": { + "default": [], + "items": { + "description": "A chunk of a tool call (e.g., as part of a stream).\n\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\nall string attributes are concatenated. Chunks are only merged if their\nvalues of `index` are equal and not None.\n\nExample:\n\n.. code-block:: python\n\n left_chunks = [ToolCallChunk(name=\"foo\", args='{\"a\":', index=0)]\n right_chunks = [ToolCallChunk(name=None, args='1}', index=0)]\n\n (\n AIMessageChunk(content=\"\", tool_call_chunks=left_chunks)\n + AIMessageChunk(content=\"\", tool_call_chunks=right_chunks)\n ).tool_call_chunks == [ToolCallChunk(name='foo', args='{\"a\":1}', index=0)]", + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Name" + }, + "args": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Args" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Id" + }, + "index": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Index" + }, + "type": { + "const": "tool_call_chunk", + "title": "Type", + "type": "string" + } + }, + "required": [ + "name", + "args", + "id", + "index" + ], + "title": "ToolCallChunk", + "type": "object" + }, + "title": "Tool Call Chunks", + "type": "array" + } + }, + "required": [ + "content" + ], + "title": "AIMessageChunk", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Human Message chunk.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "HumanMessageChunk", + "default": "HumanMessageChunk", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "example": { + "default": false, + "title": "Example", + "type": "boolean" + } + }, + "required": [ + "content" + ], + "title": "HumanMessageChunk", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Chat Message chunk.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "ChatMessageChunk", + "default": "ChatMessageChunk", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "role": { + "title": "Role", + "type": "string" + } + }, + "required": [ + "content", + "role" + ], + "title": "ChatMessageChunk", + "type": "object" + }, + { + "additionalProperties": true, + "description": "System Message chunk.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "SystemMessageChunk", + "default": "SystemMessageChunk", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + } + }, + "required": [ + "content" + ], + "title": "SystemMessageChunk", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Function Message chunk.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "FunctionMessageChunk", + "default": "FunctionMessageChunk", + "title": "Type", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + } + }, + "required": [ + "content", + "name" + ], + "title": "FunctionMessageChunk", + "type": "object" + }, + { + "additionalProperties": true, + "description": "Tool Message chunk.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "additional_kwargs": { + "additionalProperties": true, + "title": "Additional Kwargs", + "type": "object" + }, + "response_metadata": { + "additionalProperties": true, + "title": "Response Metadata", + "type": "object" + }, + "type": { + "const": "ToolMessageChunk", + "default": "ToolMessageChunk", + "title": "Type", + "type": "string" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Name" + }, + "id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Id" + }, + "tool_call_id": { + "title": "Tool Call Id", + "type": "string" + }, + "artifact": { + "default": null, + "title": "Artifact" + }, + "status": { + "default": "success", + "enum": [ + "success", + "error" + ], + "title": "Status", + "type": "string" + } + }, + "required": [ + "content", + "tool_call_id" + ], + "title": "ToolMessageChunk", + "type": "object" + } + ] + }, + "title": "Messages", + "type": "array" + } + }, + "required": [ + "messages" + ] + }, + "output": { + "type": "object", + "properties": { + "answer": { + "title": "Answer", + "type": "string" + } + }, + "required": [ + "answer" + ] + } + } + ], + "bindings": { + "version": "2.0", + "resources": [] + } +} \ No newline at end of file