Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions src/llama_stack_client/lib/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,8 +188,8 @@ def initialize(self) -> None:
)
self.agent_id = agentic_system_create_response.agent_id
for tg in self.agent_config["toolgroups"]:
for tool in self.client.tools.list(toolgroup_id=tg):
self.builtin_tools[tool.identifier] = tool
for tool in self.client.tools.list(toolgroup_id=tg if isinstance(tg, str) else tg.get("name")):
self.builtin_tools[tool.identifier] = tg.get("args", {}) if isinstance(tg, dict) else {}

def create_session(self, session_name: str) -> str:
agentic_system_create_session_response = self.client.agents.session.create(
Expand Down Expand Up @@ -225,7 +225,7 @@ def _run_tool(self, tool_calls: List[ToolCall]) -> ToolResponseParam:
if tool_call.tool_name in self.builtin_tools:
tool_result = self.client.tool_runtime.invoke_tool(
tool_name=tool_call.tool_name,
kwargs=tool_call.arguments,
kwargs={**tool_call.arguments, **self.builtin_tools[tool_call.tool_name]},
)
tool_response = ToolResponseParam(
call_id=tool_call.call_id,
Expand Down Expand Up @@ -411,7 +411,7 @@ async def initialize(self) -> None:
self._agent_id = agentic_system_create_response.agent_id
for tg in self.agent_config["toolgroups"]:
for tool in await self.client.tools.list(toolgroup_id=tg):
self.builtin_tools[tool.identifier] = tool
self.builtin_tools[tool.identifier] = tg.get("args", {}) if isinstance(tg, dict) else {}

async def create_session(self, session_name: str) -> str:
await self.initialize()
Expand Down Expand Up @@ -462,7 +462,7 @@ async def _run_tool(self, tool_calls: List[ToolCall]) -> ToolResponseParam:
if tool_call.tool_name in self.builtin_tools:
tool_result = await self.client.tool_runtime.invoke_tool(
tool_name=tool_call.tool_name,
kwargs=tool_call.arguments,
kwargs={**tool_call.arguments, **self.builtin_tools[tool_call.tool_name]},
)
tool_response = ToolResponseParam(
call_id=tool_call.call_id,
Expand Down
14 changes: 7 additions & 7 deletions src/llama_stack_client/lib/agents/react/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

The `action` key should specify the $TOOL_NAME the name of the tool to use and the `tool_params` key should specify the parameters key as input to the tool.

Make sure to have the $TOOL_PARAMS as a dictionary in the right format for the tool you are using, and do not put variable names as input if you can find the right values.
Make sure to have the $TOOL_PARAMS as a list of dictionaries in the right format for the tool you are using, and do not put variable names as input if you can find the right values.

You should always think about one action to take, and have the `thought` key contain your thought process about this action.
If the tool responds, the tool will return an observation containing result of the action.
Expand All @@ -37,7 +37,7 @@
"thought": "I need to transform the image that I received in the previous observation to make it green.",
"action": {
"tool_name": "image_transformer",
"tool_params": {"image": "image_1.jpg"}
"tool_params": [{"name": "image"}, {"value": "image_1.jpg"}]
},
"answer": null
}
Expand All @@ -61,7 +61,7 @@
"thought": "I will proceed step by step and use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer.",
"action": {
"tool_name": "document_qa",
"tool_params": {"document": "document.pdf", "question": "Who is the oldest person mentioned?"}
"tool_params": [{"name": "document"}, {"value": "document.pdf"}, {"name": "question"}, {"value": "Who is the oldest person mentioned?"}]
},
"answer": null
}
Expand All @@ -73,7 +73,7 @@
"thought": "I will now generate an image showcasing the oldest person.",
"action": {
"tool_name": "image_generator",
"tool_params": {"prompt": "A portrait of John Doe, a 55-year-old man living in Canada."}
"tool_params": [{"name": "prompt"}, {"value": "A portrait of John Doe, a 55-year-old man living in Canada."}]
},
"answer": null
}
Expand All @@ -93,7 +93,7 @@
"thought": "I will use python code evaluator to compute the result of the operation and then return the final answer using the `final_answer` tool",
"action": {
"tool_name": "python_interpreter",
"tool_params": {"code": "5 + 3 + 1294.678"}
"tool_params": [{"name": "code"}, {"value": "5 + 3 + 1294.678"}]
},
"answer": null
}
Expand All @@ -113,7 +113,7 @@
"thought": "I need to get the populations for both cities and compare them: I will use the tool `search` to get the population of both cities.",
"action": {
"tool_name": "search",
"tool_params": {"query": "Population Guangzhou"}
"tool_params": [{"name": "query"}, {"value": "Population Guangzhou"}]
},
"answer": null
}
Expand All @@ -124,7 +124,7 @@
"thought": "Now let's get the population of Shanghai using the tool 'search'.",
"action": {
"tool_name": "search",
"tool_params": {"query": "Population Shanghai"}
"tool_params": [{"name": "query"}, {"value": "Population Shanghai"}]
},
"answer": null
}
Expand Down