Skip to content

Commit 9af7a2c

Browse files
committed
chore: update agent call
followup on llamastack#3810 Signed-off-by: Sébastien Han <seb@redhat.com>
1 parent f205ab6 commit 9af7a2c

File tree

1 file changed

+1
-28
lines changed

1 file changed

+1
-28
lines changed

tests/integration/agents/test_agents.py

Lines changed: 1 addition & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -91,23 +91,10 @@ def get_boiling_point_with_metadata(liquid_name: str, celcius: bool = True) -> d
9191

9292
@pytest.fixture(scope="session")
9393
def agent_config(llama_stack_client, text_model_id):
94-
available_shields = [shield.identifier for shield in llama_stack_client.shields.list()]
95-
available_shields = available_shields[:1]
9694
agent_config = dict(
9795
model=text_model_id,
9896
instructions="You are a helpful assistant",
99-
sampling_params={
100-
"strategy": {
101-
"type": "top_p",
102-
"temperature": 0.0001,
103-
"top_p": 0.9,
104-
},
105-
"max_tokens": 512,
106-
},
10797
tools=[],
108-
input_shields=available_shields,
109-
output_shields=available_shields,
110-
enable_session_persistence=False,
11198
)
11299
return agent_config
113100

@@ -117,16 +104,7 @@ def agent_config_without_safety(text_model_id):
117104
agent_config = dict(
118105
model=text_model_id,
119106
instructions="You are a helpful assistant",
120-
sampling_params={
121-
"strategy": {
122-
"type": "top_p",
123-
"temperature": 0.0001,
124-
"top_p": 0.9,
125-
},
126-
"max_tokens": 512,
127-
},
128107
tools=[],
129-
enable_session_persistence=False,
130108
)
131109
return agent_config
132110

@@ -146,7 +124,7 @@ def test_agent_simple(llama_stack_client, agent_config):
146124

147125
assert "hello" in logs_str.lower()
148126

149-
if len(agent_config["input_shields"]) > 0:
127+
if "input_shields" in agent_config and len(agent_config.get("input_shields", [])) > 0:
150128
pytest.skip("Shield support not available in new Agent implementation")
151129

152130

@@ -289,7 +267,6 @@ def test_custom_tool_infinite_loop(llama_stack_client, agent_config):
289267
**agent_config,
290268
"instructions": "You are a helpful assistant Always respond with tool calls no matter what. ",
291269
"tools": [client_tool],
292-
"max_infer_iters": 5,
293270
}
294271

295272
agent = build_agent(llama_stack_client, agent_config)
@@ -333,9 +310,7 @@ def run_agent_with_tool_choice(client, agent_config, tool_choice):
333310

334311
test_agent_config = {
335312
**agent_config,
336-
"tool_config": {"tool_choice": tool_choice},
337313
"tools": [client_tool],
338-
"max_infer_iters": 2,
339314
}
340315

341316
agent = build_agent(client, test_agent_config)
@@ -358,8 +333,6 @@ def test_create_turn_response(llama_stack_client, agent_config, client_tools):
358333
client_tool, expects_metadata = client_tools
359334
agent_config = {
360335
**agent_config,
361-
"input_shields": [],
362-
"output_shields": [],
363336
"tools": [client_tool],
364337
}
365338

0 commit comments

Comments
 (0)