diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 51eda4ba..f3a4a10b 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.3.0-alpha.3"
+ ".": "0.3.0-alpha.4"
}
diff --git a/.stats.yml b/.stats.yml
index 436151e8..5588dfb4 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 109
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-4337a6181c2db17737133e944b4b660a5e00ea10dce6be3252918e39451e9b5f.yml
-openapi_spec_hash: a0bc8f4b5f45bc5741fed8eaa61171c3
-config_hash: 0412cd40c0609550c1a47c69dd104e4f
+configured_endpoints: 108
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-b220f9f8667d2af8007134d0403b24452c20c9c512ca87d0b69b20b761272609.yml
+openapi_spec_hash: cde1096a830f2081d68f858f020fd53f
+config_hash: 8800bdff1a087b9d5211dda2a7b9f66f
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 05e36acf..76851aac 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,28 @@
# Changelog
+## 0.3.0-alpha.4 (2025-10-02)
+
+Full Changelog: [v0.3.0-alpha.3...v0.3.0-alpha.4](https://github.com/llamastack/llama-stack-client-python/compare/v0.3.0-alpha.3...v0.3.0-alpha.4)
+
+### ⚠ BREAKING CHANGES
+
+* **api:** use input_schema instead of parameters for tools
+
+### Features
+
+* **api:** fixes to URLs ([406c366](https://github.com/llamastack/llama-stack-client-python/commit/406c36699f5618b0d2673ab38c93516aa403778f))
+* **api:** tool api (input_schema, etc.) changes ([064b98b](https://github.com/llamastack/llama-stack-client-python/commit/064b98bb38a87ee2c9deb93344409216a389aecd))
+* **api:** use input_schema instead of parameters for tools ([2d53df4](https://github.com/llamastack/llama-stack-client-python/commit/2d53df4f8b44af56019571e4b2db9ab875fb13d3))
+
+
+### Bug Fixes
+
+* **api:** another fix to capture correct responses.create() params ([a41fdb1](https://github.com/llamastack/llama-stack-client-python/commit/a41fdb1089f180f612e4fee2204217099c1dddd0))
+* **api:** fix the ToolDefParam updates ([4e24a76](https://github.com/llamastack/llama-stack-client-python/commit/4e24a76a0065b5ebea99a5792389ce9aa0fe7483))
+* **manual:** kill arguments_json ([a05eb61](https://github.com/llamastack/llama-stack-client-python/commit/a05eb6194fd7234420e6237cd34b84d2c859f525))
+* **manual:** update lib/ code for the input_schema changes ([67b3d02](https://github.com/llamastack/llama-stack-client-python/commit/67b3d02467b6cb12d606d0626bbc2b52ae767885))
+* **manual:** use tool.name instead of tool.identifier ([8542d1d](https://github.com/llamastack/llama-stack-client-python/commit/8542d1d2aaff782091ac5dc8c8dac59a0d1a5fa6))
+
## 0.3.0-alpha.3 (2025-09-30)
Full Changelog: [v0.3.0-alpha.2...v0.3.0-alpha.3](https://github.com/llamastack/llama-stack-client-python/compare/v0.3.0-alpha.2...v0.3.0-alpha.3)
diff --git a/api.md b/api.md
index 7efc4fae..3319e27c 100644
--- a/api.md
+++ b/api.md
@@ -42,13 +42,13 @@ Methods:
Types:
```python
-from llama_stack_client.types import ListToolsResponse, Tool, ToolListResponse
+from llama_stack_client.types import ToolListResponse
```
Methods:
- client.tools.list(\*\*params) -> ToolListResponse
-- client.tools.get(tool_name) -> Tool
+- client.tools.get(tool_name) -> ToolDef
# ToolRuntime
@@ -118,12 +118,12 @@ from llama_stack_client.types import (
Methods:
-- client.datasets.retrieve(dataset_id) -> DatasetRetrieveResponse
-- client.datasets.list() -> DatasetListResponse
-- client.datasets.appendrows(dataset_id, \*\*params) -> None
-- client.datasets.iterrows(dataset_id, \*\*params) -> DatasetIterrowsResponse
-- client.datasets.register(\*\*params) -> DatasetRegisterResponse
-- client.datasets.unregister(dataset_id) -> None
+- client.datasets.retrieve(dataset_id) -> DatasetRetrieveResponse
+- client.datasets.list() -> DatasetListResponse
+- client.datasets.appendrows(dataset_id, \*\*params) -> None
+- client.datasets.iterrows(dataset_id, \*\*params) -> DatasetIterrowsResponse
+- client.datasets.register(\*\*params) -> DatasetRegisterResponse
+- client.datasets.unregister(dataset_id) -> None
# Inspect
@@ -406,14 +406,13 @@ from llama_stack_client.types import (
Methods:
-- client.telemetry.get_span(span_id, \*, trace_id) -> TelemetryGetSpanResponse
-- client.telemetry.get_span_tree(span_id, \*\*params) -> TelemetryGetSpanTreeResponse
-- client.telemetry.get_trace(trace_id) -> Trace
-- client.telemetry.log_event(\*\*params) -> None
-- client.telemetry.query_metrics(metric_name, \*\*params) -> TelemetryQueryMetricsResponse
-- client.telemetry.query_spans(\*\*params) -> TelemetryQuerySpansResponse
-- client.telemetry.query_traces(\*\*params) -> TelemetryQueryTracesResponse
-- client.telemetry.save_spans_to_dataset(\*\*params) -> None
+- client.telemetry.get_span(span_id, \*, trace_id) -> TelemetryGetSpanResponse
+- client.telemetry.get_span_tree(span_id, \*\*params) -> TelemetryGetSpanTreeResponse
+- client.telemetry.get_trace(trace_id) -> Trace
+- client.telemetry.query_metrics(metric_name, \*\*params) -> TelemetryQueryMetricsResponse
+- client.telemetry.query_spans(\*\*params) -> TelemetryQuerySpansResponse
+- client.telemetry.query_traces(\*\*params) -> TelemetryQueryTracesResponse
+- client.telemetry.save_spans_to_dataset(\*\*params) -> None
# Scoring
@@ -457,9 +456,9 @@ from llama_stack_client.types import Benchmark, ListBenchmarksResponse, Benchmar
Methods:
-- client.benchmarks.retrieve(benchmark_id) -> Benchmark
-- client.benchmarks.list() -> BenchmarkListResponse
-- client.benchmarks.register(\*\*params) -> None
+- client.benchmarks.retrieve(benchmark_id) -> Benchmark
+- client.benchmarks.list() -> BenchmarkListResponse
+- client.benchmarks.register(\*\*params) -> None
# Files
diff --git a/pyproject.toml b/pyproject.toml
index e0d567b3..99c36889 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "llama_stack_client"
-version = "0.3.0-alpha.3"
+version = "0.3.0-alpha.4"
description = "The official Python library for the llama-stack-client API"
dynamic = ["readme"]
license = "MIT"
diff --git a/src/llama_stack_client/lib/agents/agent.py b/src/llama_stack_client/lib/agents/agent.py
index 779c44c2..5e00a88b 100644
--- a/src/llama_stack_client/lib/agents/agent.py
+++ b/src/llama_stack_client/lib/agents/agent.py
@@ -212,7 +212,7 @@ def initialize(self) -> None:
for tg in self.agent_config["toolgroups"]:
toolgroup_id = tg if isinstance(tg, str) else tg.get("name")
for tool in self.client.tools.list(toolgroup_id=toolgroup_id, extra_headers=self.extra_headers):
- self.builtin_tools[tool.identifier] = tg.get("args", {}) if isinstance(tg, dict) else {}
+ self.builtin_tools[tool.name] = tg.get("args", {}) if isinstance(tg, dict) else {}
def create_session(self, session_name: str) -> str:
agentic_system_create_session_response = self.client.alpha.agents.session.create(
@@ -475,7 +475,7 @@ async def initialize(self) -> None:
self._agent_id = agentic_system_create_response.agent_id
for tg in self.agent_config["toolgroups"]:
for tool in await self.client.tools.list(toolgroup_id=tg, extra_headers=self.extra_headers):
- self.builtin_tools[tool.identifier] = tg.get("args", {}) if isinstance(tg, dict) else {}
+ self.builtin_tools[tool.name] = tg.get("args", {}) if isinstance(tg, dict) else {}
async def create_session(self, session_name: str) -> str:
await self.initialize()
diff --git a/src/llama_stack_client/lib/agents/client_tool.py b/src/llama_stack_client/lib/agents/client_tool.py
index f017d651..09164361 100644
--- a/src/llama_stack_client/lib/agents/client_tool.py
+++ b/src/llama_stack_client/lib/agents/client_tool.py
@@ -19,9 +19,17 @@
Union,
)
+from typing_extensions import TypedDict
+
from llama_stack_client.types import CompletionMessage, Message
from llama_stack_client.types.alpha import ToolResponse
-from llama_stack_client.types.tool_def_param import Parameter, ToolDefParam
+from llama_stack_client.types.tool_def_param import ToolDefParam
+
+
+class JSONSchema(TypedDict, total=False):
+ type: str
+ properties: Dict[str, Any]
+ required: List[str]
class ClientTool:
@@ -47,28 +55,18 @@ def get_description(self) -> str:
raise NotImplementedError
@abstractmethod
- def get_params_definition(self) -> Dict[str, Parameter]:
+ def get_input_schema(self) -> JSONSchema:
raise NotImplementedError
def get_instruction_string(self) -> str:
return f"Use the function '{self.get_name()}' to: {self.get_description()}"
- def parameters_for_system_prompt(self) -> str:
- return json.dumps(
- {
- "name": self.get_name(),
- "description": self.get_description(),
- "parameters": {name: definition for name, definition in self.get_params_definition().items()},
- }
- )
-
def get_tool_definition(self) -> ToolDefParam:
return ToolDefParam(
name=self.get_name(),
description=self.get_description(),
- parameters=list(self.get_params_definition().values()),
+ input_schema=self.get_input_schema(),
metadata={},
- tool_prompt_format="python_list",
)
def run(
@@ -83,12 +81,7 @@ def run(
metadata = {}
try:
- if tool_call.arguments_json is not None:
- params = json.loads(tool_call.arguments_json)
- elif isinstance(tool_call.arguments, str):
- params = json.loads(tool_call.arguments)
- else:
- params = tool_call.arguments
+ params = json.loads(tool_call.arguments)
response = self.run_impl(**params)
if isinstance(response, dict) and "content" in response:
@@ -148,6 +141,37 @@ def async_run_impl(self, **kwargs):
T = TypeVar("T", bound=Callable)
+def _python_type_to_json_schema_type(type_hint: Any) -> str:
+ """Convert Python type hints to JSON Schema type strings."""
+ # Handle Union types (e.g., Optional[str])
+ origin = get_origin(type_hint)
+ if origin is Union:
+ # Get non-None types from Union
+ args = [arg for arg in get_args(type_hint) if arg is not type(None)]
+ if args:
+ type_hint = args[0] # Use first non-None type
+
+ # Get the actual type if it's a generic
+ if hasattr(type_hint, "__origin__"):
+ type_hint = type_hint.__origin__
+
+ # Map Python types to JSON Schema types
+ type_name = getattr(type_hint, "__name__", str(type_hint))
+
+ type_mapping = {
+ "bool": "boolean",
+ "int": "integer",
+ "float": "number",
+ "str": "string",
+ "list": "array",
+ "dict": "object",
+ "List": "array",
+ "Dict": "object",
+ }
+
+ return type_mapping.get(type_name, "string") # Default to string if unknown
+
+
def client_tool(func: T) -> ClientTool:
"""
Decorator to convert a function into a ClientTool.
@@ -188,13 +212,14 @@ def get_description(self) -> str:
f"No description found for client tool {__name__}. Please provide a RST-style docstring with description and :param tags for each parameter."
)
- def get_params_definition(self) -> Dict[str, Parameter]:
+ def get_input_schema(self) -> JSONSchema:
hints = get_type_hints(func)
# Remove return annotation if present
hints.pop("return", None)
# Get parameter descriptions from docstring
- params = {}
+ properties = {}
+ required = []
sig = inspect.signature(func)
doc = inspect.getdoc(func) or ""
@@ -212,15 +237,20 @@ def get_params_definition(self) -> Dict[str, Parameter]:
param = sig.parameters[name]
is_optional_type = get_origin(type_hint) is Union and type(None) in get_args(type_hint)
is_required = param.default == inspect.Parameter.empty and not is_optional_type
- params[name] = Parameter(
- name=name,
- description=param_doc or f"Parameter {name}",
- parameter_type=type_hint.__name__,
- default=(param.default if param.default != inspect.Parameter.empty else None),
- required=is_required,
- )
- return params
+ properties[name] = {
+ "type": _python_type_to_json_schema_type(type_hint),
+ "description": param_doc,
+ }
+
+ if is_required:
+ required.append(name)
+
+ return {
+ "type": "object",
+ "properties": properties,
+ "required": required,
+ }
def run_impl(self, **kwargs) -> Any:
if inspect.iscoroutinefunction(func):
diff --git a/src/llama_stack_client/lib/agents/react/agent.py b/src/llama_stack_client/lib/agents/react/agent.py
index d1ca4777..919f0420 100644
--- a/src/llama_stack_client/lib/agents/react/agent.py
+++ b/src/llama_stack_client/lib/agents/react/agent.py
@@ -37,7 +37,7 @@ def get_tool_defs(
{
"name": tool.identifier,
"description": tool.description,
- "parameters": tool.parameters,
+ "input_schema": tool.input_schema,
}
for tool in client.tools.list(toolgroup_id=toolgroup_id)
]
@@ -48,7 +48,7 @@ def get_tool_defs(
{
"name": tool.get_name(),
"description": tool.get_description(),
- "parameters": tool.get_params_definition(),
+ "input_schema": tool.get_input_schema(),
}
for tool in client_tools
]
diff --git a/src/llama_stack_client/lib/agents/react/tool_parser.py b/src/llama_stack_client/lib/agents/react/tool_parser.py
index 76b787dd..a796abac 100644
--- a/src/llama_stack_client/lib/agents/react/tool_parser.py
+++ b/src/llama_stack_client/lib/agents/react/tool_parser.py
@@ -55,8 +55,7 @@ def get_tool_calls(self, output_message: CompletionMessage) -> List[ToolCall]:
ToolCall(
call_id=call_id,
tool_name=tool_name,
- arguments=params,
- arguments_json=json.dumps(params),
+ arguments=json.dumps(params),
)
]
diff --git a/src/llama_stack_client/resources/benchmarks.py b/src/llama_stack_client/resources/benchmarks.py
index 92b8a0c1..3d33bdcf 100644
--- a/src/llama_stack_client/resources/benchmarks.py
+++ b/src/llama_stack_client/resources/benchmarks.py
@@ -71,7 +71,7 @@ def retrieve(
if not benchmark_id:
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
return self._get(
- f"/v1/eval/benchmarks/{benchmark_id}",
+ f"/v1alpha/eval/benchmarks/{benchmark_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -90,7 +90,7 @@ def list(
) -> BenchmarkListResponse:
"""List all benchmarks."""
return self._get(
- "/v1/eval/benchmarks",
+ "/v1alpha/eval/benchmarks",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -143,7 +143,7 @@ def register(
"""
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
- "/v1/eval/benchmarks",
+ "/v1alpha/eval/benchmarks",
body=maybe_transform(
{
"benchmark_id": benchmark_id,
@@ -208,7 +208,7 @@ async def retrieve(
if not benchmark_id:
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
return await self._get(
- f"/v1/eval/benchmarks/{benchmark_id}",
+ f"/v1alpha/eval/benchmarks/{benchmark_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -227,7 +227,7 @@ async def list(
) -> BenchmarkListResponse:
"""List all benchmarks."""
return await self._get(
- "/v1/eval/benchmarks",
+ "/v1alpha/eval/benchmarks",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -280,7 +280,7 @@ async def register(
"""
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
- "/v1/eval/benchmarks",
+ "/v1alpha/eval/benchmarks",
body=await async_maybe_transform(
{
"benchmark_id": benchmark_id,
diff --git a/src/llama_stack_client/resources/datasets.py b/src/llama_stack_client/resources/datasets.py
index e3a2af6d..5824287c 100644
--- a/src/llama_stack_client/resources/datasets.py
+++ b/src/llama_stack_client/resources/datasets.py
@@ -74,7 +74,7 @@ def retrieve(
if not dataset_id:
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
return self._get(
- f"/v1/datasets/{dataset_id}",
+ f"/v1beta/datasets/{dataset_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -93,7 +93,7 @@ def list(
) -> DatasetListResponse:
"""List all datasets."""
return self._get(
- "/v1/datasets",
+ "/v1beta/datasets",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -134,7 +134,7 @@ def appendrows(
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
- f"/v1/datasetio/append-rows/{dataset_id}",
+ f"/v1beta/datasetio/append-rows/{dataset_id}",
body=maybe_transform({"rows": rows}, dataset_appendrows_params.DatasetAppendrowsParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -183,7 +183,7 @@ def iterrows(
if not dataset_id:
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
return self._get(
- f"/v1/datasetio/iterrows/{dataset_id}",
+ f"/v1beta/datasetio/iterrows/{dataset_id}",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -253,7 +253,7 @@ def register(
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
- "/v1/datasets",
+ "/v1beta/datasets",
body=maybe_transform(
{
"purpose": purpose,
@@ -296,7 +296,7 @@ def unregister(
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/v1/datasets/{dataset_id}",
+ f"/v1beta/datasets/{dataset_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -350,7 +350,7 @@ async def retrieve(
if not dataset_id:
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
return await self._get(
- f"/v1/datasets/{dataset_id}",
+ f"/v1beta/datasets/{dataset_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -369,7 +369,7 @@ async def list(
) -> DatasetListResponse:
"""List all datasets."""
return await self._get(
- "/v1/datasets",
+ "/v1beta/datasets",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -410,7 +410,7 @@ async def appendrows(
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
- f"/v1/datasetio/append-rows/{dataset_id}",
+ f"/v1beta/datasetio/append-rows/{dataset_id}",
body=await async_maybe_transform({"rows": rows}, dataset_appendrows_params.DatasetAppendrowsParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -459,7 +459,7 @@ async def iterrows(
if not dataset_id:
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
return await self._get(
- f"/v1/datasetio/iterrows/{dataset_id}",
+ f"/v1beta/datasetio/iterrows/{dataset_id}",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -529,7 +529,7 @@ async def register(
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
- "/v1/datasets",
+ "/v1beta/datasets",
body=await async_maybe_transform(
{
"purpose": purpose,
@@ -572,7 +572,7 @@ async def unregister(
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/v1/datasets/{dataset_id}",
+ f"/v1beta/datasets/{dataset_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/telemetry.py b/src/llama_stack_client/resources/telemetry.py
index daafbb50..972b8fdf 100644
--- a/src/llama_stack_client/resources/telemetry.py
+++ b/src/llama_stack_client/resources/telemetry.py
@@ -8,7 +8,6 @@
import httpx
from ..types import (
- telemetry_log_event_params,
telemetry_query_spans_params,
telemetry_query_traces_params,
telemetry_get_span_tree_params,
@@ -28,7 +27,6 @@
from .._wrappers import DataWrapper
from ..types.trace import Trace
from .._base_client import make_request_options
-from ..types.event_param import EventParam
from ..types.query_condition_param import QueryConditionParam
from ..types.telemetry_get_span_response import TelemetryGetSpanResponse
from ..types.telemetry_query_spans_response import TelemetryQuerySpansResponse
@@ -88,7 +86,7 @@ def get_span(
if not span_id:
raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}")
return self._get(
- f"/v1/telemetry/traces/{trace_id}/spans/{span_id}",
+ f"/v1alpha/telemetry/traces/{trace_id}/spans/{span_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -127,7 +125,7 @@ def get_span_tree(
if not span_id:
raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}")
return self._post(
- f"/v1/telemetry/spans/{span_id}/tree",
+ f"/v1alpha/telemetry/spans/{span_id}/tree",
body=maybe_transform(
{
"attributes_to_return": attributes_to_return,
@@ -171,57 +169,13 @@ def get_trace(
if not trace_id:
raise ValueError(f"Expected a non-empty value for `trace_id` but received {trace_id!r}")
return self._get(
- f"/v1/telemetry/traces/{trace_id}",
+ f"/v1alpha/telemetry/traces/{trace_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Trace,
)
- def log_event(
- self,
- *,
- event: EventParam,
- ttl_seconds: int,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Log an event.
-
- Args:
- event: The event to log.
-
- ttl_seconds: The time to live of the event.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._post(
- "/v1/telemetry/events",
- body=maybe_transform(
- {
- "event": event,
- "ttl_seconds": ttl_seconds,
- },
- telemetry_log_event_params.TelemetryLogEventParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
def query_metrics(
self,
metric_name: str,
@@ -263,7 +217,7 @@ def query_metrics(
if not metric_name:
raise ValueError(f"Expected a non-empty value for `metric_name` but received {metric_name!r}")
return self._post(
- f"/v1/telemetry/metrics/{metric_name}",
+ f"/v1alpha/telemetry/metrics/{metric_name}",
body=maybe_transform(
{
"query_type": query_type,
@@ -316,7 +270,7 @@ def query_spans(
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
- "/v1/telemetry/spans",
+ "/v1alpha/telemetry/spans",
body=maybe_transform(
{
"attribute_filters": attribute_filters,
@@ -370,7 +324,7 @@ def query_traces(
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
- "/v1/telemetry/traces",
+ "/v1alpha/telemetry/traces",
body=maybe_transform(
{
"attribute_filters": attribute_filters,
@@ -426,7 +380,7 @@ def save_spans_to_dataset(
"""
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
- "/v1/telemetry/spans/export",
+ "/v1alpha/telemetry/spans/export",
body=maybe_transform(
{
"attribute_filters": attribute_filters,
@@ -492,7 +446,7 @@ async def get_span(
if not span_id:
raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}")
return await self._get(
- f"/v1/telemetry/traces/{trace_id}/spans/{span_id}",
+ f"/v1alpha/telemetry/traces/{trace_id}/spans/{span_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -531,7 +485,7 @@ async def get_span_tree(
if not span_id:
raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}")
return await self._post(
- f"/v1/telemetry/spans/{span_id}/tree",
+ f"/v1alpha/telemetry/spans/{span_id}/tree",
body=await async_maybe_transform(
{
"attributes_to_return": attributes_to_return,
@@ -575,57 +529,13 @@ async def get_trace(
if not trace_id:
raise ValueError(f"Expected a non-empty value for `trace_id` but received {trace_id!r}")
return await self._get(
- f"/v1/telemetry/traces/{trace_id}",
+ f"/v1alpha/telemetry/traces/{trace_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Trace,
)
- async def log_event(
- self,
- *,
- event: EventParam,
- ttl_seconds: int,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Log an event.
-
- Args:
- event: The event to log.
-
- ttl_seconds: The time to live of the event.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._post(
- "/v1/telemetry/events",
- body=await async_maybe_transform(
- {
- "event": event,
- "ttl_seconds": ttl_seconds,
- },
- telemetry_log_event_params.TelemetryLogEventParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
async def query_metrics(
self,
metric_name: str,
@@ -667,7 +577,7 @@ async def query_metrics(
if not metric_name:
raise ValueError(f"Expected a non-empty value for `metric_name` but received {metric_name!r}")
return await self._post(
- f"/v1/telemetry/metrics/{metric_name}",
+ f"/v1alpha/telemetry/metrics/{metric_name}",
body=await async_maybe_transform(
{
"query_type": query_type,
@@ -720,7 +630,7 @@ async def query_spans(
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
- "/v1/telemetry/spans",
+ "/v1alpha/telemetry/spans",
body=await async_maybe_transform(
{
"attribute_filters": attribute_filters,
@@ -774,7 +684,7 @@ async def query_traces(
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
- "/v1/telemetry/traces",
+ "/v1alpha/telemetry/traces",
body=await async_maybe_transform(
{
"attribute_filters": attribute_filters,
@@ -830,7 +740,7 @@ async def save_spans_to_dataset(
"""
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
- "/v1/telemetry/spans/export",
+ "/v1alpha/telemetry/spans/export",
body=await async_maybe_transform(
{
"attribute_filters": attribute_filters,
@@ -860,9 +770,6 @@ def __init__(self, telemetry: TelemetryResource) -> None:
self.get_trace = to_raw_response_wrapper(
telemetry.get_trace,
)
- self.log_event = to_raw_response_wrapper(
- telemetry.log_event,
- )
self.query_metrics = to_raw_response_wrapper(
telemetry.query_metrics,
)
@@ -890,9 +797,6 @@ def __init__(self, telemetry: AsyncTelemetryResource) -> None:
self.get_trace = async_to_raw_response_wrapper(
telemetry.get_trace,
)
- self.log_event = async_to_raw_response_wrapper(
- telemetry.log_event,
- )
self.query_metrics = async_to_raw_response_wrapper(
telemetry.query_metrics,
)
@@ -920,9 +824,6 @@ def __init__(self, telemetry: TelemetryResource) -> None:
self.get_trace = to_streamed_response_wrapper(
telemetry.get_trace,
)
- self.log_event = to_streamed_response_wrapper(
- telemetry.log_event,
- )
self.query_metrics = to_streamed_response_wrapper(
telemetry.query_metrics,
)
@@ -950,9 +851,6 @@ def __init__(self, telemetry: AsyncTelemetryResource) -> None:
self.get_trace = async_to_streamed_response_wrapper(
telemetry.get_trace,
)
- self.log_event = async_to_streamed_response_wrapper(
- telemetry.log_event,
- )
self.query_metrics = async_to_streamed_response_wrapper(
telemetry.query_metrics,
)
diff --git a/src/llama_stack_client/resources/tools.py b/src/llama_stack_client/resources/tools.py
index 6d405bed..adbf4402 100644
--- a/src/llama_stack_client/resources/tools.py
+++ b/src/llama_stack_client/resources/tools.py
@@ -18,8 +18,8 @@
async_to_streamed_response_wrapper,
)
from .._wrappers import DataWrapper
-from ..types.tool import Tool
from .._base_client import make_request_options
+from ..types.tool_def import ToolDef
from ..types.tool_list_response import ToolListResponse
__all__ = ["ToolsResource", "AsyncToolsResource"]
@@ -93,7 +93,7 @@ def get(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Tool:
+ ) -> ToolDef:
"""
Get a tool by its name.
@@ -113,7 +113,7 @@ def get(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=Tool,
+ cast_to=ToolDef,
)
@@ -185,7 +185,7 @@ async def get(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Tool:
+ ) -> ToolDef:
"""
Get a tool by its name.
@@ -205,7 +205,7 @@ async def get(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=Tool,
+ cast_to=ToolDef,
)
diff --git a/src/llama_stack_client/types/__init__.py b/src/llama_stack_client/types/__init__.py
index 0c3d0f34..2b89de40 100644
--- a/src/llama_stack_client/types/__init__.py
+++ b/src/llama_stack_client/types/__init__.py
@@ -3,7 +3,6 @@
from __future__ import annotations
from .file import File as File
-from .tool import Tool as Tool
from .model import Model as Model
from .trace import Trace as Trace
from .shared import (
@@ -31,7 +30,6 @@
from .route_info import RouteInfo as RouteInfo
from .scoring_fn import ScoringFn as ScoringFn
from .tool_group import ToolGroup as ToolGroup
-from .event_param import EventParam as EventParam
from .health_info import HealthInfo as HealthInfo
from .vector_store import VectorStore as VectorStore
from .version_info import VersionInfo as VersionInfo
@@ -46,7 +44,6 @@
from .file_create_params import FileCreateParams as FileCreateParams
from .tool_list_response import ToolListResponse as ToolListResponse
from .list_files_response import ListFilesResponse as ListFilesResponse
-from .list_tools_response import ListToolsResponse as ListToolsResponse
from .model_list_response import ModelListResponse as ModelListResponse
from .route_list_response import RouteListResponse as RouteListResponse
from .run_shield_response import RunShieldResponse as RunShieldResponse
@@ -99,7 +96,6 @@
from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse
from .create_embeddings_response import CreateEmbeddingsResponse as CreateEmbeddingsResponse
from .scoring_score_batch_params import ScoringScoreBatchParams as ScoringScoreBatchParams
-from .telemetry_log_event_params import TelemetryLogEventParams as TelemetryLogEventParams
from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
diff --git a/src/llama_stack_client/types/event_param.py b/src/llama_stack_client/types/event_param.py
deleted file mode 100644
index b26f2916..00000000
--- a/src/llama_stack_client/types/event_param.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union
-from datetime import datetime
-from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict
-
-from .._utils import PropertyInfo
-
-__all__ = [
- "EventParam",
- "UnstructuredLogEvent",
- "MetricEvent",
- "StructuredLogEvent",
- "StructuredLogEventPayload",
- "StructuredLogEventPayloadSpanStartPayload",
- "StructuredLogEventPayloadSpanEndPayload",
-]
-
-
-class UnstructuredLogEvent(TypedDict, total=False):
- message: Required[str]
- """The log message text"""
-
- severity: Required[Literal["verbose", "debug", "info", "warn", "error", "critical"]]
- """The severity level of the log message"""
-
- span_id: Required[str]
- """Unique identifier for the span this event belongs to"""
-
- timestamp: Required[Annotated[Union[str, datetime], PropertyInfo(format="iso8601")]]
- """Timestamp when the event occurred"""
-
- trace_id: Required[str]
- """Unique identifier for the trace this event belongs to"""
-
- type: Required[Literal["unstructured_log"]]
- """Event type identifier set to UNSTRUCTURED_LOG"""
-
- attributes: Dict[str, Union[str, float, bool, None]]
- """(Optional) Key-value pairs containing additional metadata about the event"""
-
-
-class MetricEvent(TypedDict, total=False):
- metric: Required[str]
- """The name of the metric being measured"""
-
- span_id: Required[str]
- """Unique identifier for the span this event belongs to"""
-
- timestamp: Required[Annotated[Union[str, datetime], PropertyInfo(format="iso8601")]]
- """Timestamp when the event occurred"""
-
- trace_id: Required[str]
- """Unique identifier for the trace this event belongs to"""
-
- type: Required[Literal["metric"]]
- """Event type identifier set to METRIC"""
-
- unit: Required[str]
- """The unit of measurement for the metric value"""
-
- value: Required[float]
- """The numeric value of the metric measurement"""
-
- attributes: Dict[str, Union[str, float, bool, None]]
- """(Optional) Key-value pairs containing additional metadata about the event"""
-
-
-class StructuredLogEventPayloadSpanStartPayload(TypedDict, total=False):
- name: Required[str]
- """Human-readable name describing the operation this span represents"""
-
- type: Required[Literal["span_start"]]
- """Payload type identifier set to SPAN_START"""
-
- parent_span_id: str
- """(Optional) Unique identifier for the parent span, if this is a child span"""
-
-
-class StructuredLogEventPayloadSpanEndPayload(TypedDict, total=False):
- status: Required[Literal["ok", "error"]]
- """The final status of the span indicating success or failure"""
-
- type: Required[Literal["span_end"]]
- """Payload type identifier set to SPAN_END"""
-
-
-StructuredLogEventPayload: TypeAlias = Union[
- StructuredLogEventPayloadSpanStartPayload, StructuredLogEventPayloadSpanEndPayload
-]
-
-
-class StructuredLogEvent(TypedDict, total=False):
- payload: Required[StructuredLogEventPayload]
- """The structured payload data for the log event"""
-
- span_id: Required[str]
- """Unique identifier for the span this event belongs to"""
-
- timestamp: Required[Annotated[Union[str, datetime], PropertyInfo(format="iso8601")]]
- """Timestamp when the event occurred"""
-
- trace_id: Required[str]
- """Unique identifier for the trace this event belongs to"""
-
- type: Required[Literal["structured_log"]]
- """Event type identifier set to STRUCTURED_LOG"""
-
- attributes: Dict[str, Union[str, float, bool, None]]
- """(Optional) Key-value pairs containing additional metadata about the event"""
-
-
-EventParam: TypeAlias = Union[UnstructuredLogEvent, MetricEvent, StructuredLogEvent]
diff --git a/src/llama_stack_client/types/list_tools_response.py b/src/llama_stack_client/types/list_tools_response.py
deleted file mode 100644
index 47f040b5..00000000
--- a/src/llama_stack_client/types/list_tools_response.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-from .tool_list_response import ToolListResponse
-
-__all__ = ["ListToolsResponse"]
-
-
-class ListToolsResponse(BaseModel):
- data: ToolListResponse
- """List of tools"""
diff --git a/src/llama_stack_client/types/response_create_params.py b/src/llama_stack_client/types/response_create_params.py
index ecd8da4e..daf7f6cf 100644
--- a/src/llama_stack_client/types/response_create_params.py
+++ b/src/llama_stack_client/types/response_create_params.py
@@ -15,6 +15,8 @@
"InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult",
"InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall",
"InputUnionMember1OpenAIResponseInputFunctionToolCallOutput",
+ "InputUnionMember1OpenAIResponseMcpApprovalRequest",
+ "InputUnionMember1OpenAIResponseMcpApprovalResponse",
"InputUnionMember1OpenAIResponseMessage",
"InputUnionMember1OpenAIResponseMessageContentUnionMember1",
"InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
@@ -150,6 +152,30 @@ class InputUnionMember1OpenAIResponseInputFunctionToolCallOutput(TypedDict, tota
status: str
+class InputUnionMember1OpenAIResponseMcpApprovalRequest(TypedDict, total=False):
+ id: Required[str]
+
+ arguments: Required[str]
+
+ name: Required[str]
+
+ server_label: Required[str]
+
+ type: Required[Literal["mcp_approval_request"]]
+
+
+class InputUnionMember1OpenAIResponseMcpApprovalResponse(TypedDict, total=False):
+ approval_request_id: Required[str]
+
+ approve: Required[bool]
+
+ type: Required[Literal["mcp_approval_response"]]
+
+ id: str
+
+ reason: str
+
+
class InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(
TypedDict, total=False
):
@@ -279,6 +305,8 @@ class InputUnionMember1OpenAIResponseMessage(TypedDict, total=False):
InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall,
InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall,
InputUnionMember1OpenAIResponseInputFunctionToolCallOutput,
+ InputUnionMember1OpenAIResponseMcpApprovalRequest,
+ InputUnionMember1OpenAIResponseMcpApprovalResponse,
InputUnionMember1OpenAIResponseMessage,
]
diff --git a/src/llama_stack_client/types/response_list_response.py b/src/llama_stack_client/types/response_list_response.py
index ac7ec1b1..dec51231 100644
--- a/src/llama_stack_client/types/response_list_response.py
+++ b/src/llama_stack_client/types/response_list_response.py
@@ -16,6 +16,8 @@
"InputOpenAIResponseOutputMessageFileSearchToolCallResult",
"InputOpenAIResponseOutputMessageFunctionToolCall",
"InputOpenAIResponseInputFunctionToolCallOutput",
+ "InputOpenAIResponseMcpApprovalRequest",
+ "InputOpenAIResponseMcpApprovalResponse",
"InputOpenAIResponseMessage",
"InputOpenAIResponseMessageContentUnionMember1",
"InputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
@@ -44,6 +46,7 @@
"OutputOpenAIResponseOutputMessageMcpCall",
"OutputOpenAIResponseOutputMessageMcpListTools",
"OutputOpenAIResponseOutputMessageMcpListToolsTool",
+ "OutputOpenAIResponseMcpApprovalRequest",
"Text",
"TextFormat",
"Error",
@@ -127,6 +130,30 @@ class InputOpenAIResponseInputFunctionToolCallOutput(BaseModel):
status: Optional[str] = None
+class InputOpenAIResponseMcpApprovalRequest(BaseModel):
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Literal["mcp_approval_request"]
+
+
+class InputOpenAIResponseMcpApprovalResponse(BaseModel):
+ approval_request_id: str
+
+ approve: bool
+
+ type: Literal["mcp_approval_response"]
+
+ id: Optional[str] = None
+
+ reason: Optional[str] = None
+
+
class InputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel):
text: str
"""The text content of the input message"""
@@ -246,6 +273,8 @@ class InputOpenAIResponseMessage(BaseModel):
InputOpenAIResponseOutputMessageFileSearchToolCall,
InputOpenAIResponseOutputMessageFunctionToolCall,
InputOpenAIResponseInputFunctionToolCallOutput,
+ InputOpenAIResponseMcpApprovalRequest,
+ InputOpenAIResponseMcpApprovalResponse,
InputOpenAIResponseMessage,
]
@@ -477,6 +506,18 @@ class OutputOpenAIResponseOutputMessageMcpListTools(BaseModel):
"""Tool call type identifier, always "mcp_list_tools" """
+class OutputOpenAIResponseMcpApprovalRequest(BaseModel):
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Literal["mcp_approval_request"]
+
+
Output: TypeAlias = Annotated[
Union[
OutputOpenAIResponseMessage,
@@ -485,6 +526,7 @@ class OutputOpenAIResponseOutputMessageMcpListTools(BaseModel):
OutputOpenAIResponseOutputMessageFunctionToolCall,
OutputOpenAIResponseOutputMessageMcpCall,
OutputOpenAIResponseOutputMessageMcpListTools,
+ OutputOpenAIResponseMcpApprovalRequest,
],
PropertyInfo(discriminator="type"),
]
diff --git a/src/llama_stack_client/types/response_object.py b/src/llama_stack_client/types/response_object.py
index b618ddf5..84a0297b 100644
--- a/src/llama_stack_client/types/response_object.py
+++ b/src/llama_stack_client/types/response_object.py
@@ -28,6 +28,7 @@
"OutputOpenAIResponseOutputMessageMcpCall",
"OutputOpenAIResponseOutputMessageMcpListTools",
"OutputOpenAIResponseOutputMessageMcpListToolsTool",
+ "OutputOpenAIResponseMcpApprovalRequest",
"Text",
"TextFormat",
"Error",
@@ -261,6 +262,18 @@ class OutputOpenAIResponseOutputMessageMcpListTools(BaseModel):
"""Tool call type identifier, always "mcp_list_tools" """
+class OutputOpenAIResponseMcpApprovalRequest(BaseModel):
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Literal["mcp_approval_request"]
+
+
Output: TypeAlias = Annotated[
Union[
OutputOpenAIResponseMessage,
@@ -269,6 +282,7 @@ class OutputOpenAIResponseOutputMessageMcpListTools(BaseModel):
OutputOpenAIResponseOutputMessageFunctionToolCall,
OutputOpenAIResponseOutputMessageMcpCall,
OutputOpenAIResponseOutputMessageMcpListTools,
+ OutputOpenAIResponseMcpApprovalRequest,
],
PropertyInfo(discriminator="type"),
]
diff --git a/src/llama_stack_client/types/response_object_stream.py b/src/llama_stack_client/types/response_object_stream.py
index 426e9263..7ec15480 100644
--- a/src/llama_stack_client/types/response_object_stream.py
+++ b/src/llama_stack_client/types/response_object_stream.py
@@ -29,6 +29,7 @@
"OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpCall",
"OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpListTools",
"OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpListToolsTool",
+ "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMcpApprovalRequest",
"OpenAIResponseObjectStreamResponseOutputItemDone",
"OpenAIResponseObjectStreamResponseOutputItemDoneItem",
"OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessage",
@@ -48,6 +49,7 @@
"OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpCall",
"OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpListTools",
"OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpListToolsTool",
+ "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMcpApprovalRequest",
"OpenAIResponseObjectStreamResponseOutputTextDelta",
"OpenAIResponseObjectStreamResponseOutputTextDone",
"OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta",
@@ -330,6 +332,18 @@ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputM
"""Tool call type identifier, always "mcp_list_tools" """
+class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMcpApprovalRequest(BaseModel):
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Literal["mcp_approval_request"]
+
+
OpenAIResponseObjectStreamResponseOutputItemAddedItem: TypeAlias = Annotated[
Union[
OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessage,
@@ -338,6 +352,7 @@ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputM
OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFunctionToolCall,
OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpCall,
OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpListTools,
+ OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMcpApprovalRequest,
],
PropertyInfo(discriminator="type"),
]
@@ -607,6 +622,18 @@ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMe
"""Tool call type identifier, always "mcp_list_tools" """
+class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMcpApprovalRequest(BaseModel):
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Literal["mcp_approval_request"]
+
+
OpenAIResponseObjectStreamResponseOutputItemDoneItem: TypeAlias = Annotated[
Union[
OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessage,
@@ -615,6 +642,7 @@ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMe
OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFunctionToolCall,
OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpCall,
OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpListTools,
+ OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMcpApprovalRequest,
],
PropertyInfo(discriminator="type"),
]
diff --git a/src/llama_stack_client/types/responses/input_item_list_response.py b/src/llama_stack_client/types/responses/input_item_list_response.py
index 714ff703..6862492d 100644
--- a/src/llama_stack_client/types/responses/input_item_list_response.py
+++ b/src/llama_stack_client/types/responses/input_item_list_response.py
@@ -14,6 +14,8 @@
"DataOpenAIResponseOutputMessageFileSearchToolCallResult",
"DataOpenAIResponseOutputMessageFunctionToolCall",
"DataOpenAIResponseInputFunctionToolCallOutput",
+ "DataOpenAIResponseMcpApprovalRequest",
+ "DataOpenAIResponseMcpApprovalResponse",
"DataOpenAIResponseMessage",
"DataOpenAIResponseMessageContentUnionMember1",
"DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
@@ -104,6 +106,30 @@ class DataOpenAIResponseInputFunctionToolCallOutput(BaseModel):
status: Optional[str] = None
+class DataOpenAIResponseMcpApprovalRequest(BaseModel):
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Literal["mcp_approval_request"]
+
+
+class DataOpenAIResponseMcpApprovalResponse(BaseModel):
+ approval_request_id: str
+
+ approve: bool
+
+ type: Literal["mcp_approval_response"]
+
+ id: Optional[str] = None
+
+ reason: Optional[str] = None
+
+
class DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel):
text: str
"""The text content of the input message"""
@@ -223,6 +249,8 @@ class DataOpenAIResponseMessage(BaseModel):
DataOpenAIResponseOutputMessageFileSearchToolCall,
DataOpenAIResponseOutputMessageFunctionToolCall,
DataOpenAIResponseInputFunctionToolCallOutput,
+ DataOpenAIResponseMcpApprovalRequest,
+ DataOpenAIResponseMcpApprovalResponse,
DataOpenAIResponseMessage,
]
diff --git a/src/llama_stack_client/types/shared/tool_call.py b/src/llama_stack_client/types/shared/tool_call.py
index b9301d75..a35cd6dd 100644
--- a/src/llama_stack_client/types/shared/tool_call.py
+++ b/src/llama_stack_client/types/shared/tool_call.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Dict, List, Union, Optional
+from typing import Union
from typing_extensions import Literal
from ..._models import BaseModel
@@ -9,18 +9,8 @@
class ToolCall(BaseModel):
- arguments: Union[
- str,
- Dict[
- str,
- Union[
- str, float, bool, List[Union[str, float, bool, None]], Dict[str, Union[str, float, bool, None]], None
- ],
- ],
- ]
+ arguments: str
call_id: str
tool_name: Union[Literal["brave_search", "wolfram_alpha", "photogen", "code_interpreter"], str]
-
- arguments_json: Optional[str] = None
diff --git a/src/llama_stack_client/types/shared_params/tool_call.py b/src/llama_stack_client/types/shared_params/tool_call.py
index 55d53099..16686e61 100644
--- a/src/llama_stack_client/types/shared_params/tool_call.py
+++ b/src/llama_stack_client/types/shared_params/tool_call.py
@@ -2,34 +2,15 @@
from __future__ import annotations
-from typing import Dict, Union
+from typing import Union
from typing_extensions import Literal, Required, TypedDict
-from ..._types import SequenceNotStr
-
__all__ = ["ToolCall"]
class ToolCall(TypedDict, total=False):
- arguments: Required[
- Union[
- str,
- Dict[
- str,
- Union[
- str,
- float,
- bool,
- SequenceNotStr[Union[str, float, bool, None]],
- Dict[str, Union[str, float, bool, None]],
- None,
- ],
- ],
- ]
- ]
+ arguments: Required[str]
call_id: Required[str]
tool_name: Required[Union[Literal["brave_search", "wolfram_alpha", "photogen", "code_interpreter"], str]]
-
- arguments_json: str
diff --git a/src/llama_stack_client/types/telemetry_log_event_params.py b/src/llama_stack_client/types/telemetry_log_event_params.py
deleted file mode 100644
index 246b6526..00000000
--- a/src/llama_stack_client/types/telemetry_log_event_params.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-from .event_param import EventParam
-
-__all__ = ["TelemetryLogEventParams"]
-
-
-class TelemetryLogEventParams(TypedDict, total=False):
- event: Required[EventParam]
- """The event to log."""
-
- ttl_seconds: Required[int]
- """The time to live of the event."""
diff --git a/src/llama_stack_client/types/tool.py b/src/llama_stack_client/types/tool.py
deleted file mode 100644
index a7243b64..00000000
--- a/src/llama_stack_client/types/tool.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["Tool", "Parameter"]
-
-
-class Parameter(BaseModel):
- description: str
- """Human-readable description of what the parameter does"""
-
- name: str
- """Name of the parameter"""
-
- parameter_type: str
- """Type of the parameter (e.g., string, integer)"""
-
- required: bool
- """Whether this parameter is required for tool invocation"""
-
- default: Union[bool, float, str, List[object], object, None] = None
- """(Optional) Default value for the parameter if not provided"""
-
- items: Optional[object] = None
- """Type of the elements when parameter_type is array"""
-
- title: Optional[str] = None
- """(Optional) Title of the parameter"""
-
-
-class Tool(BaseModel):
- description: str
- """Human-readable description of what the tool does"""
-
- identifier: str
-
- parameters: List[Parameter]
- """List of parameters this tool accepts"""
-
- provider_id: str
-
- toolgroup_id: str
- """ID of the tool group this tool belongs to"""
-
- type: Literal["tool"]
- """Type of resource, always 'tool'"""
-
- metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
- """(Optional) Additional metadata about the tool"""
-
- provider_resource_id: Optional[str] = None
diff --git a/src/llama_stack_client/types/tool_def.py b/src/llama_stack_client/types/tool_def.py
index 21949b41..4674f832 100644
--- a/src/llama_stack_client/types/tool_def.py
+++ b/src/llama_stack_client/types/tool_def.py
@@ -4,30 +4,7 @@
from .._models import BaseModel
-__all__ = ["ToolDef", "Parameter"]
-
-
-class Parameter(BaseModel):
- description: str
- """Human-readable description of what the parameter does"""
-
- name: str
- """Name of the parameter"""
-
- parameter_type: str
- """Type of the parameter (e.g., string, integer)"""
-
- required: bool
- """Whether this parameter is required for tool invocation"""
-
- default: Union[bool, float, str, List[object], object, None] = None
- """(Optional) Default value for the parameter if not provided"""
-
- items: Optional[object] = None
- """Type of the elements when parameter_type is array"""
-
- title: Optional[str] = None
- """(Optional) Title of the parameter"""
+__all__ = ["ToolDef"]
class ToolDef(BaseModel):
@@ -37,8 +14,14 @@ class ToolDef(BaseModel):
description: Optional[str] = None
"""(Optional) Human-readable description of what the tool does"""
+ input_schema: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """(Optional) JSON Schema for tool inputs (MCP inputSchema)"""
+
metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
"""(Optional) Additional metadata about the tool"""
- parameters: Optional[List[Parameter]] = None
- """(Optional) List of parameters this tool accepts"""
+ output_schema: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """(Optional) JSON Schema for tool outputs (MCP outputSchema)"""
+
+ toolgroup_id: Optional[str] = None
+ """(Optional) ID of the tool group this tool belongs to"""
diff --git a/src/llama_stack_client/types/tool_def_param.py b/src/llama_stack_client/types/tool_def_param.py
index a50437b2..d14ef6cc 100644
--- a/src/llama_stack_client/types/tool_def_param.py
+++ b/src/llama_stack_client/types/tool_def_param.py
@@ -5,30 +5,7 @@
from typing import Dict, Union, Iterable
from typing_extensions import Required, TypedDict
-__all__ = ["ToolDefParam", "Parameter"]
-
-
-class Parameter(TypedDict, total=False):
- description: Required[str]
- """Human-readable description of what the parameter does"""
-
- name: Required[str]
- """Name of the parameter"""
-
- parameter_type: Required[str]
- """Type of the parameter (e.g., string, integer)"""
-
- required: Required[bool]
- """Whether this parameter is required for tool invocation"""
-
- default: Union[bool, float, str, Iterable[object], object, None]
- """(Optional) Default value for the parameter if not provided"""
-
- items: object
- """Type of the elements when parameter_type is array"""
-
- title: str
- """(Optional) Title of the parameter"""
+__all__ = ["ToolDefParam"]
class ToolDefParam(TypedDict, total=False):
@@ -38,8 +15,14 @@ class ToolDefParam(TypedDict, total=False):
description: str
"""(Optional) Human-readable description of what the tool does"""
+ input_schema: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
+ """(Optional) JSON Schema for tool inputs (MCP inputSchema)"""
+
metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
"""(Optional) Additional metadata about the tool"""
- parameters: Iterable[Parameter]
- """(Optional) List of parameters this tool accepts"""
+ output_schema: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
+ """(Optional) JSON Schema for tool outputs (MCP outputSchema)"""
+
+ toolgroup_id: str
+ """(Optional) ID of the tool group this tool belongs to"""
diff --git a/src/llama_stack_client/types/tool_list_response.py b/src/llama_stack_client/types/tool_list_response.py
index 11750ace..bb6c935f 100644
--- a/src/llama_stack_client/types/tool_list_response.py
+++ b/src/llama_stack_client/types/tool_list_response.py
@@ -3,8 +3,8 @@
from typing import List
from typing_extensions import TypeAlias
-from .tool import Tool
+from .tool_def import ToolDef
__all__ = ["ToolListResponse"]
-ToolListResponse: TypeAlias = List[Tool]
+ToolListResponse: TypeAlias = List[ToolDef]
diff --git a/tests/api_resources/alpha/test_agents.py b/tests/api_resources/alpha/test_agents.py
index d67e8457..075bd478 100644
--- a/tests/api_resources/alpha/test_agents.py
+++ b/tests/api_resources/alpha/test_agents.py
@@ -41,18 +41,10 @@ def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
{
"name": "name",
"description": "description",
+ "input_schema": {"foo": True},
"metadata": {"foo": True},
- "parameters": [
- {
- "description": "description",
- "name": "name",
- "parameter_type": "parameter_type",
- "required": True,
- "default": True,
- "items": {},
- "title": "title",
- }
- ],
+ "output_schema": {"foo": True},
+ "toolgroup_id": "toolgroup_id",
}
],
"enable_session_persistence": True,
@@ -247,18 +239,10 @@ async def test_method_create_with_all_params(self, async_client: AsyncLlamaStack
{
"name": "name",
"description": "description",
+ "input_schema": {"foo": True},
"metadata": {"foo": True},
- "parameters": [
- {
- "description": "description",
- "name": "name",
- "parameter_type": "parameter_type",
- "required": True,
- "default": True,
- "items": {},
- "title": "title",
- }
- ],
+ "output_schema": {"foo": True},
+ "toolgroup_id": "toolgroup_id",
}
],
"enable_session_persistence": True,
diff --git a/tests/api_resources/test_telemetry.py b/tests/api_resources/test_telemetry.py
index ea123787..07075c52 100644
--- a/tests/api_resources/test_telemetry.py
+++ b/tests/api_resources/test_telemetry.py
@@ -17,7 +17,6 @@
TelemetryQueryTracesResponse,
TelemetryQueryMetricsResponse,
)
-from llama_stack_client._utils import parse_datetime
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -158,77 +157,6 @@ def test_path_params_get_trace(self, client: LlamaStackClient) -> None:
"",
)
- @parametrize
- def test_method_log_event(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- },
- ttl_seconds=0,
- )
- assert telemetry is None
-
- @parametrize
- def test_method_log_event_with_all_params(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- "attributes": {"foo": "string"},
- },
- ttl_seconds=0,
- )
- assert telemetry is None
-
- @parametrize
- def test_raw_response_log_event(self, client: LlamaStackClient) -> None:
- response = client.telemetry.with_raw_response.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- },
- ttl_seconds=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = response.parse()
- assert telemetry is None
-
- @parametrize
- def test_streaming_response_log_event(self, client: LlamaStackClient) -> None:
- with client.telemetry.with_streaming_response.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- },
- ttl_seconds=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = response.parse()
- assert telemetry is None
-
- assert cast(Any, response.is_closed) is True
-
@pytest.mark.skip(reason="unsupported query params in java / kotlin")
@parametrize
def test_method_query_metrics(self, client: LlamaStackClient) -> None:
@@ -624,77 +552,6 @@ async def test_path_params_get_trace(self, async_client: AsyncLlamaStackClient)
"",
)
- @parametrize
- async def test_method_log_event(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- },
- ttl_seconds=0,
- )
- assert telemetry is None
-
- @parametrize
- async def test_method_log_event_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- "attributes": {"foo": "string"},
- },
- ttl_seconds=0,
- )
- assert telemetry is None
-
- @parametrize
- async def test_raw_response_log_event(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.telemetry.with_raw_response.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- },
- ttl_seconds=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = await response.parse()
- assert telemetry is None
-
- @parametrize
- async def test_streaming_response_log_event(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.telemetry.with_streaming_response.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- },
- ttl_seconds=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = await response.parse()
- assert telemetry is None
-
- assert cast(Any, response.is_closed) is True
-
@pytest.mark.skip(reason="unsupported query params in java / kotlin")
@parametrize
async def test_method_query_metrics(self, async_client: AsyncLlamaStackClient) -> None:
diff --git a/tests/api_resources/test_tools.py b/tests/api_resources/test_tools.py
index 3c1f0da4..6fafb9f9 100644
--- a/tests/api_resources/test_tools.py
+++ b/tests/api_resources/test_tools.py
@@ -9,7 +9,7 @@
from tests.utils import assert_matches_type
from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import Tool, ToolListResponse
+from llama_stack_client.types import ToolDef, ToolListResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -54,7 +54,7 @@ def test_method_get(self, client: LlamaStackClient) -> None:
tool = client.tools.get(
"tool_name",
)
- assert_matches_type(Tool, tool, path=["response"])
+ assert_matches_type(ToolDef, tool, path=["response"])
@parametrize
def test_raw_response_get(self, client: LlamaStackClient) -> None:
@@ -65,7 +65,7 @@ def test_raw_response_get(self, client: LlamaStackClient) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
tool = response.parse()
- assert_matches_type(Tool, tool, path=["response"])
+ assert_matches_type(ToolDef, tool, path=["response"])
@parametrize
def test_streaming_response_get(self, client: LlamaStackClient) -> None:
@@ -76,7 +76,7 @@ def test_streaming_response_get(self, client: LlamaStackClient) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
tool = response.parse()
- assert_matches_type(Tool, tool, path=["response"])
+ assert_matches_type(ToolDef, tool, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -130,7 +130,7 @@ async def test_method_get(self, async_client: AsyncLlamaStackClient) -> None:
tool = await async_client.tools.get(
"tool_name",
)
- assert_matches_type(Tool, tool, path=["response"])
+ assert_matches_type(ToolDef, tool, path=["response"])
@parametrize
async def test_raw_response_get(self, async_client: AsyncLlamaStackClient) -> None:
@@ -141,7 +141,7 @@ async def test_raw_response_get(self, async_client: AsyncLlamaStackClient) -> No
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
tool = await response.parse()
- assert_matches_type(Tool, tool, path=["response"])
+ assert_matches_type(ToolDef, tool, path=["response"])
@parametrize
async def test_streaming_response_get(self, async_client: AsyncLlamaStackClient) -> None:
@@ -152,7 +152,7 @@ async def test_streaming_response_get(self, async_client: AsyncLlamaStackClient)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
tool = await response.parse()
- assert_matches_type(Tool, tool, path=["response"])
+ assert_matches_type(ToolDef, tool, path=["response"])
assert cast(Any, response.is_closed) is True