From 2d53df4f8b44af56019571e4b2db9ab875fb13d3 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 1 Oct 2025 21:28:07 +0000
Subject: [PATCH 1/9] feat(api)!: use input_schema instead of parameters for
tools
---
.stats.yml | 4 +-
.../types/response_create_params.py | 28 +++++++++++++
.../types/response_list_response.py | 42 +++++++++++++++++++
.../types/response_object.py | 14 +++++++
.../types/response_object_stream.py | 28 +++++++++++++
.../responses/input_item_list_response.py | 28 +++++++++++++
src/llama_stack_client/types/tool.py | 34 ++++-----------
src/llama_stack_client/types/tool_def.py | 32 +++-----------
.../types/tool_def_param.py | 32 +++-----------
tests/api_resources/alpha/test_agents.py | 26 ++----------
10 files changed, 165 insertions(+), 103 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 436151e8..d9b62ff0 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 109
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-4337a6181c2db17737133e944b4b660a5e00ea10dce6be3252918e39451e9b5f.yml
-openapi_spec_hash: a0bc8f4b5f45bc5741fed8eaa61171c3
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-5f0f0b99d1b0bf40e00e11f5d134ed13de97799cf2dfea0c8612e2f003584505.yml
+openapi_spec_hash: 5f51544cb340c37aba54b93a526c536e
config_hash: 0412cd40c0609550c1a47c69dd104e4f
diff --git a/src/llama_stack_client/types/response_create_params.py b/src/llama_stack_client/types/response_create_params.py
index ecd8da4e..daf7f6cf 100644
--- a/src/llama_stack_client/types/response_create_params.py
+++ b/src/llama_stack_client/types/response_create_params.py
@@ -15,6 +15,8 @@
"InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult",
"InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall",
"InputUnionMember1OpenAIResponseInputFunctionToolCallOutput",
+ "InputUnionMember1OpenAIResponseMcpApprovalRequest",
+ "InputUnionMember1OpenAIResponseMcpApprovalResponse",
"InputUnionMember1OpenAIResponseMessage",
"InputUnionMember1OpenAIResponseMessageContentUnionMember1",
"InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
@@ -150,6 +152,30 @@ class InputUnionMember1OpenAIResponseInputFunctionToolCallOutput(TypedDict, tota
status: str
+class InputUnionMember1OpenAIResponseMcpApprovalRequest(TypedDict, total=False):
+ id: Required[str]
+
+ arguments: Required[str]
+
+ name: Required[str]
+
+ server_label: Required[str]
+
+ type: Required[Literal["mcp_approval_request"]]
+
+
+class InputUnionMember1OpenAIResponseMcpApprovalResponse(TypedDict, total=False):
+ approval_request_id: Required[str]
+
+ approve: Required[bool]
+
+ type: Required[Literal["mcp_approval_response"]]
+
+ id: str
+
+ reason: str
+
+
class InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(
TypedDict, total=False
):
@@ -279,6 +305,8 @@ class InputUnionMember1OpenAIResponseMessage(TypedDict, total=False):
InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall,
InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall,
InputUnionMember1OpenAIResponseInputFunctionToolCallOutput,
+ InputUnionMember1OpenAIResponseMcpApprovalRequest,
+ InputUnionMember1OpenAIResponseMcpApprovalResponse,
InputUnionMember1OpenAIResponseMessage,
]
diff --git a/src/llama_stack_client/types/response_list_response.py b/src/llama_stack_client/types/response_list_response.py
index ac7ec1b1..dec51231 100644
--- a/src/llama_stack_client/types/response_list_response.py
+++ b/src/llama_stack_client/types/response_list_response.py
@@ -16,6 +16,8 @@
"InputOpenAIResponseOutputMessageFileSearchToolCallResult",
"InputOpenAIResponseOutputMessageFunctionToolCall",
"InputOpenAIResponseInputFunctionToolCallOutput",
+ "InputOpenAIResponseMcpApprovalRequest",
+ "InputOpenAIResponseMcpApprovalResponse",
"InputOpenAIResponseMessage",
"InputOpenAIResponseMessageContentUnionMember1",
"InputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
@@ -44,6 +46,7 @@
"OutputOpenAIResponseOutputMessageMcpCall",
"OutputOpenAIResponseOutputMessageMcpListTools",
"OutputOpenAIResponseOutputMessageMcpListToolsTool",
+ "OutputOpenAIResponseMcpApprovalRequest",
"Text",
"TextFormat",
"Error",
@@ -127,6 +130,30 @@ class InputOpenAIResponseInputFunctionToolCallOutput(BaseModel):
status: Optional[str] = None
+class InputOpenAIResponseMcpApprovalRequest(BaseModel):
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Literal["mcp_approval_request"]
+
+
+class InputOpenAIResponseMcpApprovalResponse(BaseModel):
+ approval_request_id: str
+
+ approve: bool
+
+ type: Literal["mcp_approval_response"]
+
+ id: Optional[str] = None
+
+ reason: Optional[str] = None
+
+
class InputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel):
text: str
"""The text content of the input message"""
@@ -246,6 +273,8 @@ class InputOpenAIResponseMessage(BaseModel):
InputOpenAIResponseOutputMessageFileSearchToolCall,
InputOpenAIResponseOutputMessageFunctionToolCall,
InputOpenAIResponseInputFunctionToolCallOutput,
+ InputOpenAIResponseMcpApprovalRequest,
+ InputOpenAIResponseMcpApprovalResponse,
InputOpenAIResponseMessage,
]
@@ -477,6 +506,18 @@ class OutputOpenAIResponseOutputMessageMcpListTools(BaseModel):
"""Tool call type identifier, always "mcp_list_tools" """
+class OutputOpenAIResponseMcpApprovalRequest(BaseModel):
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Literal["mcp_approval_request"]
+
+
Output: TypeAlias = Annotated[
Union[
OutputOpenAIResponseMessage,
@@ -485,6 +526,7 @@ class OutputOpenAIResponseOutputMessageMcpListTools(BaseModel):
OutputOpenAIResponseOutputMessageFunctionToolCall,
OutputOpenAIResponseOutputMessageMcpCall,
OutputOpenAIResponseOutputMessageMcpListTools,
+ OutputOpenAIResponseMcpApprovalRequest,
],
PropertyInfo(discriminator="type"),
]
diff --git a/src/llama_stack_client/types/response_object.py b/src/llama_stack_client/types/response_object.py
index b618ddf5..84a0297b 100644
--- a/src/llama_stack_client/types/response_object.py
+++ b/src/llama_stack_client/types/response_object.py
@@ -28,6 +28,7 @@
"OutputOpenAIResponseOutputMessageMcpCall",
"OutputOpenAIResponseOutputMessageMcpListTools",
"OutputOpenAIResponseOutputMessageMcpListToolsTool",
+ "OutputOpenAIResponseMcpApprovalRequest",
"Text",
"TextFormat",
"Error",
@@ -261,6 +262,18 @@ class OutputOpenAIResponseOutputMessageMcpListTools(BaseModel):
"""Tool call type identifier, always "mcp_list_tools" """
+class OutputOpenAIResponseMcpApprovalRequest(BaseModel):
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Literal["mcp_approval_request"]
+
+
Output: TypeAlias = Annotated[
Union[
OutputOpenAIResponseMessage,
@@ -269,6 +282,7 @@ class OutputOpenAIResponseOutputMessageMcpListTools(BaseModel):
OutputOpenAIResponseOutputMessageFunctionToolCall,
OutputOpenAIResponseOutputMessageMcpCall,
OutputOpenAIResponseOutputMessageMcpListTools,
+ OutputOpenAIResponseMcpApprovalRequest,
],
PropertyInfo(discriminator="type"),
]
diff --git a/src/llama_stack_client/types/response_object_stream.py b/src/llama_stack_client/types/response_object_stream.py
index 426e9263..7ec15480 100644
--- a/src/llama_stack_client/types/response_object_stream.py
+++ b/src/llama_stack_client/types/response_object_stream.py
@@ -29,6 +29,7 @@
"OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpCall",
"OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpListTools",
"OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpListToolsTool",
+ "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMcpApprovalRequest",
"OpenAIResponseObjectStreamResponseOutputItemDone",
"OpenAIResponseObjectStreamResponseOutputItemDoneItem",
"OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessage",
@@ -48,6 +49,7 @@
"OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpCall",
"OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpListTools",
"OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpListToolsTool",
+ "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMcpApprovalRequest",
"OpenAIResponseObjectStreamResponseOutputTextDelta",
"OpenAIResponseObjectStreamResponseOutputTextDone",
"OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta",
@@ -330,6 +332,18 @@ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputM
"""Tool call type identifier, always "mcp_list_tools" """
+class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMcpApprovalRequest(BaseModel):
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Literal["mcp_approval_request"]
+
+
OpenAIResponseObjectStreamResponseOutputItemAddedItem: TypeAlias = Annotated[
Union[
OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessage,
@@ -338,6 +352,7 @@ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputM
OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFunctionToolCall,
OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpCall,
OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageMcpListTools,
+ OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMcpApprovalRequest,
],
PropertyInfo(discriminator="type"),
]
@@ -607,6 +622,18 @@ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMe
"""Tool call type identifier, always "mcp_list_tools" """
+class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMcpApprovalRequest(BaseModel):
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Literal["mcp_approval_request"]
+
+
OpenAIResponseObjectStreamResponseOutputItemDoneItem: TypeAlias = Annotated[
Union[
OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessage,
@@ -615,6 +642,7 @@ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMe
OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFunctionToolCall,
OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpCall,
OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageMcpListTools,
+ OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMcpApprovalRequest,
],
PropertyInfo(discriminator="type"),
]
diff --git a/src/llama_stack_client/types/responses/input_item_list_response.py b/src/llama_stack_client/types/responses/input_item_list_response.py
index 714ff703..6862492d 100644
--- a/src/llama_stack_client/types/responses/input_item_list_response.py
+++ b/src/llama_stack_client/types/responses/input_item_list_response.py
@@ -14,6 +14,8 @@
"DataOpenAIResponseOutputMessageFileSearchToolCallResult",
"DataOpenAIResponseOutputMessageFunctionToolCall",
"DataOpenAIResponseInputFunctionToolCallOutput",
+ "DataOpenAIResponseMcpApprovalRequest",
+ "DataOpenAIResponseMcpApprovalResponse",
"DataOpenAIResponseMessage",
"DataOpenAIResponseMessageContentUnionMember1",
"DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
@@ -104,6 +106,30 @@ class DataOpenAIResponseInputFunctionToolCallOutput(BaseModel):
status: Optional[str] = None
+class DataOpenAIResponseMcpApprovalRequest(BaseModel):
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Literal["mcp_approval_request"]
+
+
+class DataOpenAIResponseMcpApprovalResponse(BaseModel):
+ approval_request_id: str
+
+ approve: bool
+
+ type: Literal["mcp_approval_response"]
+
+ id: Optional[str] = None
+
+ reason: Optional[str] = None
+
+
class DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel):
text: str
"""The text content of the input message"""
@@ -223,6 +249,8 @@ class DataOpenAIResponseMessage(BaseModel):
DataOpenAIResponseOutputMessageFileSearchToolCall,
DataOpenAIResponseOutputMessageFunctionToolCall,
DataOpenAIResponseInputFunctionToolCallOutput,
+ DataOpenAIResponseMcpApprovalRequest,
+ DataOpenAIResponseMcpApprovalResponse,
DataOpenAIResponseMessage,
]
diff --git a/src/llama_stack_client/types/tool.py b/src/llama_stack_client/types/tool.py
index a7243b64..ae77cf69 100644
--- a/src/llama_stack_client/types/tool.py
+++ b/src/llama_stack_client/types/tool.py
@@ -5,30 +5,7 @@
from .._models import BaseModel
-__all__ = ["Tool", "Parameter"]
-
-
-class Parameter(BaseModel):
- description: str
- """Human-readable description of what the parameter does"""
-
- name: str
- """Name of the parameter"""
-
- parameter_type: str
- """Type of the parameter (e.g., string, integer)"""
-
- required: bool
- """Whether this parameter is required for tool invocation"""
-
- default: Union[bool, float, str, List[object], object, None] = None
- """(Optional) Default value for the parameter if not provided"""
-
- items: Optional[object] = None
- """Type of the elements when parameter_type is array"""
-
- title: Optional[str] = None
- """(Optional) Title of the parameter"""
+__all__ = ["Tool"]
class Tool(BaseModel):
@@ -37,9 +14,6 @@ class Tool(BaseModel):
identifier: str
- parameters: List[Parameter]
- """List of parameters this tool accepts"""
-
provider_id: str
toolgroup_id: str
@@ -48,7 +22,13 @@ class Tool(BaseModel):
type: Literal["tool"]
"""Type of resource, always 'tool'"""
+ input_schema: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """JSON Schema for the tool's input parameters"""
+
metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
"""(Optional) Additional metadata about the tool"""
+ output_schema: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """JSON Schema for the tool's output"""
+
provider_resource_id: Optional[str] = None
diff --git a/src/llama_stack_client/types/tool_def.py b/src/llama_stack_client/types/tool_def.py
index 21949b41..e549c4e9 100644
--- a/src/llama_stack_client/types/tool_def.py
+++ b/src/llama_stack_client/types/tool_def.py
@@ -4,30 +4,7 @@
from .._models import BaseModel
-__all__ = ["ToolDef", "Parameter"]
-
-
-class Parameter(BaseModel):
- description: str
- """Human-readable description of what the parameter does"""
-
- name: str
- """Name of the parameter"""
-
- parameter_type: str
- """Type of the parameter (e.g., string, integer)"""
-
- required: bool
- """Whether this parameter is required for tool invocation"""
-
- default: Union[bool, float, str, List[object], object, None] = None
- """(Optional) Default value for the parameter if not provided"""
-
- items: Optional[object] = None
- """Type of the elements when parameter_type is array"""
-
- title: Optional[str] = None
- """(Optional) Title of the parameter"""
+__all__ = ["ToolDef"]
class ToolDef(BaseModel):
@@ -37,8 +14,11 @@ class ToolDef(BaseModel):
description: Optional[str] = None
"""(Optional) Human-readable description of what the tool does"""
+ input_schema: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """(Optional) JSON Schema for tool inputs (MCP inputSchema)"""
+
metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
"""(Optional) Additional metadata about the tool"""
- parameters: Optional[List[Parameter]] = None
- """(Optional) List of parameters this tool accepts"""
+ output_schema: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """(Optional) JSON Schema for tool outputs (MCP outputSchema)"""
diff --git a/src/llama_stack_client/types/tool_def_param.py b/src/llama_stack_client/types/tool_def_param.py
index a50437b2..9d5c71a7 100644
--- a/src/llama_stack_client/types/tool_def_param.py
+++ b/src/llama_stack_client/types/tool_def_param.py
@@ -5,30 +5,7 @@
from typing import Dict, Union, Iterable
from typing_extensions import Required, TypedDict
-__all__ = ["ToolDefParam", "Parameter"]
-
-
-class Parameter(TypedDict, total=False):
- description: Required[str]
- """Human-readable description of what the parameter does"""
-
- name: Required[str]
- """Name of the parameter"""
-
- parameter_type: Required[str]
- """Type of the parameter (e.g., string, integer)"""
-
- required: Required[bool]
- """Whether this parameter is required for tool invocation"""
-
- default: Union[bool, float, str, Iterable[object], object, None]
- """(Optional) Default value for the parameter if not provided"""
-
- items: object
- """Type of the elements when parameter_type is array"""
-
- title: str
- """(Optional) Title of the parameter"""
+__all__ = ["ToolDefParam"]
class ToolDefParam(TypedDict, total=False):
@@ -38,8 +15,11 @@ class ToolDefParam(TypedDict, total=False):
description: str
"""(Optional) Human-readable description of what the tool does"""
+ input_schema: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
+ """(Optional) JSON Schema for tool inputs (MCP inputSchema)"""
+
metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
"""(Optional) Additional metadata about the tool"""
- parameters: Iterable[Parameter]
- """(Optional) List of parameters this tool accepts"""
+ output_schema: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
+ """(Optional) JSON Schema for tool outputs (MCP outputSchema)"""
diff --git a/tests/api_resources/alpha/test_agents.py b/tests/api_resources/alpha/test_agents.py
index d67e8457..e6292f65 100644
--- a/tests/api_resources/alpha/test_agents.py
+++ b/tests/api_resources/alpha/test_agents.py
@@ -41,18 +41,9 @@ def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
{
"name": "name",
"description": "description",
+ "input_schema": {"foo": True},
"metadata": {"foo": True},
- "parameters": [
- {
- "description": "description",
- "name": "name",
- "parameter_type": "parameter_type",
- "required": True,
- "default": True,
- "items": {},
- "title": "title",
- }
- ],
+ "output_schema": {"foo": True},
}
],
"enable_session_persistence": True,
@@ -247,18 +238,9 @@ async def test_method_create_with_all_params(self, async_client: AsyncLlamaStack
{
"name": "name",
"description": "description",
+ "input_schema": {"foo": True},
"metadata": {"foo": True},
- "parameters": [
- {
- "description": "description",
- "name": "name",
- "parameter_type": "parameter_type",
- "required": True,
- "default": True,
- "items": {},
- "title": "title",
- }
- ],
+ "output_schema": {"foo": True},
}
],
"enable_session_persistence": True,
From 064b98bb38a87ee2c9deb93344409216a389aecd Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 2 Oct 2025 16:49:29 +0000
Subject: [PATCH 2/9] feat(api): tool api (input_schema, etc.) changes
---
.stats.yml | 6 +-
api.md | 40 +-
src/llama_stack_client/_client.py | 76 --
src/llama_stack_client/resources/__init__.py | 28 -
.../resources/benchmarks.py | 359 --------
src/llama_stack_client/resources/datasets.py | 676 ---------------
.../resources/responses/responses.py | 300 ++-----
src/llama_stack_client/resources/telemetry.py | 784 +----------------
src/llama_stack_client/types/__init__.py | 29 +-
src/llama_stack_client/types/benchmark.py | 28 -
.../types/benchmark_list_response.py | 10 -
.../types/benchmark_register_params.py | 30 -
.../types/dataset_appendrows_params.py | 13 -
.../types/dataset_iterrows_params.py | 15 -
.../types/dataset_iterrows_response.py | 18 -
.../types/dataset_list_response.py | 66 --
.../types/dataset_register_params.py | 69 --
.../types/dataset_register_response.py | 54 --
.../types/dataset_retrieve_response.py | 54 --
.../types/list_benchmarks_response.py | 10 -
.../types/list_datasets_response.py | 11 -
.../types/query_condition_param.py | 19 -
.../types/query_spans_response.py | 11 -
.../types/response_create_params.py | 448 +---------
.../types/response_create_response.py | 640 ++++++++++++++
.../types/span_with_status.py | 35 -
.../types/telemetry_get_span_response.py | 31 -
.../types/telemetry_get_span_tree_params.py | 17 -
.../types/telemetry_get_span_tree_response.py | 10 -
.../types/telemetry_query_metrics_params.py | 36 -
.../types/telemetry_query_metrics_response.py | 45 -
.../types/telemetry_query_spans_params.py | 22 -
.../types/telemetry_query_spans_response.py | 35 -
.../types/telemetry_query_traces_params.py | 25 -
.../types/telemetry_query_traces_response.py | 10 -
.../telemetry_save_spans_to_dataset_params.py | 25 -
src/llama_stack_client/types/tool.py | 34 +-
src/llama_stack_client/types/tool_def.py | 32 +-
.../types/tool_def_param.py | 32 +-
src/llama_stack_client/types/trace.py | 22 -
tests/api_resources/alpha/test_agents.py | 26 +-
tests/api_resources/test_benchmarks.py | 248 ------
tests/api_resources/test_datasets.py | 521 ------------
tests/api_resources/test_responses.py | 233 ++----
tests/api_resources/test_telemetry.py | 787 ------------------
45 files changed, 886 insertions(+), 5134 deletions(-)
delete mode 100644 src/llama_stack_client/resources/benchmarks.py
delete mode 100644 src/llama_stack_client/resources/datasets.py
delete mode 100644 src/llama_stack_client/types/benchmark.py
delete mode 100644 src/llama_stack_client/types/benchmark_list_response.py
delete mode 100644 src/llama_stack_client/types/benchmark_register_params.py
delete mode 100644 src/llama_stack_client/types/dataset_appendrows_params.py
delete mode 100644 src/llama_stack_client/types/dataset_iterrows_params.py
delete mode 100644 src/llama_stack_client/types/dataset_iterrows_response.py
delete mode 100644 src/llama_stack_client/types/dataset_list_response.py
delete mode 100644 src/llama_stack_client/types/dataset_register_params.py
delete mode 100644 src/llama_stack_client/types/dataset_register_response.py
delete mode 100644 src/llama_stack_client/types/dataset_retrieve_response.py
delete mode 100644 src/llama_stack_client/types/list_benchmarks_response.py
delete mode 100644 src/llama_stack_client/types/list_datasets_response.py
delete mode 100644 src/llama_stack_client/types/query_condition_param.py
delete mode 100644 src/llama_stack_client/types/query_spans_response.py
create mode 100644 src/llama_stack_client/types/response_create_response.py
delete mode 100644 src/llama_stack_client/types/span_with_status.py
delete mode 100644 src/llama_stack_client/types/telemetry_get_span_response.py
delete mode 100644 src/llama_stack_client/types/telemetry_get_span_tree_params.py
delete mode 100644 src/llama_stack_client/types/telemetry_get_span_tree_response.py
delete mode 100644 src/llama_stack_client/types/telemetry_query_metrics_params.py
delete mode 100644 src/llama_stack_client/types/telemetry_query_metrics_response.py
delete mode 100644 src/llama_stack_client/types/telemetry_query_spans_params.py
delete mode 100644 src/llama_stack_client/types/telemetry_query_spans_response.py
delete mode 100644 src/llama_stack_client/types/telemetry_query_traces_params.py
delete mode 100644 src/llama_stack_client/types/telemetry_query_traces_response.py
delete mode 100644 src/llama_stack_client/types/telemetry_save_spans_to_dataset_params.py
delete mode 100644 src/llama_stack_client/types/trace.py
delete mode 100644 tests/api_resources/test_benchmarks.py
delete mode 100644 tests/api_resources/test_datasets.py
diff --git a/.stats.yml b/.stats.yml
index d9b62ff0..cbd436bf 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 109
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-5f0f0b99d1b0bf40e00e11f5d134ed13de97799cf2dfea0c8612e2f003584505.yml
-openapi_spec_hash: 5f51544cb340c37aba54b93a526c536e
+configured_endpoints: 93
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-41cb5d8049e6ffd933a7ad6bbbb76b2fef2e864d0d857c91799ee16e9a796883.yml
+openapi_spec_hash: 5e0bdf64563e020ef14b968ab724d2db
config_hash: 0412cd40c0609550c1a47c69dd104e4f
diff --git a/api.md b/api.md
index 7efc4fae..cdb427e2 100644
--- a/api.md
+++ b/api.md
@@ -78,6 +78,7 @@ Types:
from llama_stack_client.types import (
ResponseObject,
ResponseObjectStream,
+ ResponseCreateResponse,
ResponseListResponse,
ResponseDeleteResponse,
)
@@ -85,7 +86,7 @@ from llama_stack_client.types import (
Methods:
-- client.responses.create(\*\*params) -> ResponseObject
+- client.responses.create(\*\*params) -> ResponseCreateResponse
- client.responses.retrieve(response_id) -> ResponseObject
- client.responses.list(\*\*params) -> SyncOpenAICursorPage[ResponseListResponse]
- client.responses.delete(response_id) -> ResponseDeleteResponse
@@ -107,24 +108,9 @@ Methods:
Types:
```python
-from llama_stack_client.types import (
- ListDatasetsResponse,
- DatasetRetrieveResponse,
- DatasetListResponse,
- DatasetIterrowsResponse,
- DatasetRegisterResponse,
-)
+from llama_stack_client.types import ListDatasetsResponse
```
-Methods:
-
-- client.datasets.retrieve(dataset_id) -> DatasetRetrieveResponse
-- client.datasets.list() -> DatasetListResponse
-- client.datasets.appendrows(dataset_id, \*\*params) -> None
-- client.datasets.iterrows(dataset_id, \*\*params) -> DatasetIterrowsResponse
-- client.datasets.register(\*\*params) -> DatasetRegisterResponse
-- client.datasets.unregister(dataset_id) -> None
-
# Inspect
Types:
@@ -396,24 +382,12 @@ from llama_stack_client.types import (
QuerySpansResponse,
SpanWithStatus,
Trace,
- TelemetryGetSpanResponse,
- TelemetryGetSpanTreeResponse,
- TelemetryQueryMetricsResponse,
- TelemetryQuerySpansResponse,
- TelemetryQueryTracesResponse,
)
```
Methods:
-- client.telemetry.get_span(span_id, \*, trace_id) -> TelemetryGetSpanResponse
-- client.telemetry.get_span_tree(span_id, \*\*params) -> TelemetryGetSpanTreeResponse
-- client.telemetry.get_trace(trace_id) -> Trace
- client.telemetry.log_event(\*\*params) -> None
-- client.telemetry.query_metrics(metric_name, \*\*params) -> TelemetryQueryMetricsResponse
-- client.telemetry.query_spans(\*\*params) -> TelemetryQuerySpansResponse
-- client.telemetry.query_traces(\*\*params) -> TelemetryQueryTracesResponse
-- client.telemetry.save_spans_to_dataset(\*\*params) -> None
# Scoring
@@ -452,15 +426,9 @@ Methods:
Types:
```python
-from llama_stack_client.types import Benchmark, ListBenchmarksResponse, BenchmarkListResponse
+from llama_stack_client.types import Benchmark, ListBenchmarksResponse
```
-Methods:
-
-- client.benchmarks.retrieve(benchmark_id) -> Benchmark
-- client.benchmarks.list() -> BenchmarkListResponse
-- client.benchmarks.register(\*\*params) -> None
-
# Files
Types:
diff --git a/src/llama_stack_client/_client.py b/src/llama_stack_client/_client.py
index 6b8f11b2..6eebb18f 100644
--- a/src/llama_stack_client/_client.py
+++ b/src/llama_stack_client/_client.py
@@ -43,12 +43,10 @@
inspect,
scoring,
shields,
- datasets,
providers,
responses,
telemetry,
vector_io,
- benchmarks,
embeddings,
toolgroups,
vector_dbs,
@@ -66,12 +64,10 @@
from .resources.inspect import InspectResource, AsyncInspectResource
from .resources.scoring import ScoringResource, AsyncScoringResource
from .resources.shields import ShieldsResource, AsyncShieldsResource
- from .resources.datasets import DatasetsResource, AsyncDatasetsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.providers import ProvidersResource, AsyncProvidersResource
from .resources.telemetry import TelemetryResource, AsyncTelemetryResource
from .resources.vector_io import VectorIoResource, AsyncVectorIoResource
- from .resources.benchmarks import BenchmarksResource, AsyncBenchmarksResource
from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource
from .resources.toolgroups import ToolgroupsResource, AsyncToolgroupsResource
from .resources.vector_dbs import VectorDBsResource, AsyncVectorDBsResource
@@ -181,12 +177,6 @@ def responses(self) -> ResponsesResource:
return ResponsesResource(self)
- @cached_property
- def datasets(self) -> DatasetsResource:
- from .resources.datasets import DatasetsResource
-
- return DatasetsResource(self)
-
@cached_property
def inspect(self) -> InspectResource:
from .resources.inspect import InspectResource
@@ -289,12 +279,6 @@ def scoring_functions(self) -> ScoringFunctionsResource:
return ScoringFunctionsResource(self)
- @cached_property
- def benchmarks(self) -> BenchmarksResource:
- from .resources.benchmarks import BenchmarksResource
-
- return BenchmarksResource(self)
-
@cached_property
def files(self) -> FilesResource:
from .resources.files import FilesResource
@@ -503,12 +487,6 @@ def responses(self) -> AsyncResponsesResource:
return AsyncResponsesResource(self)
- @cached_property
- def datasets(self) -> AsyncDatasetsResource:
- from .resources.datasets import AsyncDatasetsResource
-
- return AsyncDatasetsResource(self)
-
@cached_property
def inspect(self) -> AsyncInspectResource:
from .resources.inspect import AsyncInspectResource
@@ -611,12 +589,6 @@ def scoring_functions(self) -> AsyncScoringFunctionsResource:
return AsyncScoringFunctionsResource(self)
- @cached_property
- def benchmarks(self) -> AsyncBenchmarksResource:
- from .resources.benchmarks import AsyncBenchmarksResource
-
- return AsyncBenchmarksResource(self)
-
@cached_property
def files(self) -> AsyncFilesResource:
from .resources.files import AsyncFilesResource
@@ -774,12 +746,6 @@ def responses(self) -> responses.ResponsesResourceWithRawResponse:
return ResponsesResourceWithRawResponse(self._client.responses)
- @cached_property
- def datasets(self) -> datasets.DatasetsResourceWithRawResponse:
- from .resources.datasets import DatasetsResourceWithRawResponse
-
- return DatasetsResourceWithRawResponse(self._client.datasets)
-
@cached_property
def inspect(self) -> inspect.InspectResourceWithRawResponse:
from .resources.inspect import InspectResourceWithRawResponse
@@ -882,12 +848,6 @@ def scoring_functions(self) -> scoring_functions.ScoringFunctionsResourceWithRaw
return ScoringFunctionsResourceWithRawResponse(self._client.scoring_functions)
- @cached_property
- def benchmarks(self) -> benchmarks.BenchmarksResourceWithRawResponse:
- from .resources.benchmarks import BenchmarksResourceWithRawResponse
-
- return BenchmarksResourceWithRawResponse(self._client.benchmarks)
-
@cached_property
def files(self) -> files.FilesResourceWithRawResponse:
from .resources.files import FilesResourceWithRawResponse
@@ -931,12 +891,6 @@ def responses(self) -> responses.AsyncResponsesResourceWithRawResponse:
return AsyncResponsesResourceWithRawResponse(self._client.responses)
- @cached_property
- def datasets(self) -> datasets.AsyncDatasetsResourceWithRawResponse:
- from .resources.datasets import AsyncDatasetsResourceWithRawResponse
-
- return AsyncDatasetsResourceWithRawResponse(self._client.datasets)
-
@cached_property
def inspect(self) -> inspect.AsyncInspectResourceWithRawResponse:
from .resources.inspect import AsyncInspectResourceWithRawResponse
@@ -1041,12 +995,6 @@ def scoring_functions(self) -> scoring_functions.AsyncScoringFunctionsResourceWi
return AsyncScoringFunctionsResourceWithRawResponse(self._client.scoring_functions)
- @cached_property
- def benchmarks(self) -> benchmarks.AsyncBenchmarksResourceWithRawResponse:
- from .resources.benchmarks import AsyncBenchmarksResourceWithRawResponse
-
- return AsyncBenchmarksResourceWithRawResponse(self._client.benchmarks)
-
@cached_property
def files(self) -> files.AsyncFilesResourceWithRawResponse:
from .resources.files import AsyncFilesResourceWithRawResponse
@@ -1090,12 +1038,6 @@ def responses(self) -> responses.ResponsesResourceWithStreamingResponse:
return ResponsesResourceWithStreamingResponse(self._client.responses)
- @cached_property
- def datasets(self) -> datasets.DatasetsResourceWithStreamingResponse:
- from .resources.datasets import DatasetsResourceWithStreamingResponse
-
- return DatasetsResourceWithStreamingResponse(self._client.datasets)
-
@cached_property
def inspect(self) -> inspect.InspectResourceWithStreamingResponse:
from .resources.inspect import InspectResourceWithStreamingResponse
@@ -1200,12 +1142,6 @@ def scoring_functions(self) -> scoring_functions.ScoringFunctionsResourceWithStr
return ScoringFunctionsResourceWithStreamingResponse(self._client.scoring_functions)
- @cached_property
- def benchmarks(self) -> benchmarks.BenchmarksResourceWithStreamingResponse:
- from .resources.benchmarks import BenchmarksResourceWithStreamingResponse
-
- return BenchmarksResourceWithStreamingResponse(self._client.benchmarks)
-
@cached_property
def files(self) -> files.FilesResourceWithStreamingResponse:
from .resources.files import FilesResourceWithStreamingResponse
@@ -1249,12 +1185,6 @@ def responses(self) -> responses.AsyncResponsesResourceWithStreamingResponse:
return AsyncResponsesResourceWithStreamingResponse(self._client.responses)
- @cached_property
- def datasets(self) -> datasets.AsyncDatasetsResourceWithStreamingResponse:
- from .resources.datasets import AsyncDatasetsResourceWithStreamingResponse
-
- return AsyncDatasetsResourceWithStreamingResponse(self._client.datasets)
-
@cached_property
def inspect(self) -> inspect.AsyncInspectResourceWithStreamingResponse:
from .resources.inspect import AsyncInspectResourceWithStreamingResponse
@@ -1359,12 +1289,6 @@ def scoring_functions(self) -> scoring_functions.AsyncScoringFunctionsResourceWi
return AsyncScoringFunctionsResourceWithStreamingResponse(self._client.scoring_functions)
- @cached_property
- def benchmarks(self) -> benchmarks.AsyncBenchmarksResourceWithStreamingResponse:
- from .resources.benchmarks import AsyncBenchmarksResourceWithStreamingResponse
-
- return AsyncBenchmarksResourceWithStreamingResponse(self._client.benchmarks)
-
@cached_property
def files(self) -> files.AsyncFilesResourceWithStreamingResponse:
from .resources.files import AsyncFilesResourceWithStreamingResponse
diff --git a/src/llama_stack_client/resources/__init__.py b/src/llama_stack_client/resources/__init__.py
index 3089ae21..27e4b3c0 100644
--- a/src/llama_stack_client/resources/__init__.py
+++ b/src/llama_stack_client/resources/__init__.py
@@ -80,14 +80,6 @@
ShieldsResourceWithStreamingResponse,
AsyncShieldsResourceWithStreamingResponse,
)
-from .datasets import (
- DatasetsResource,
- AsyncDatasetsResource,
- DatasetsResourceWithRawResponse,
- AsyncDatasetsResourceWithRawResponse,
- DatasetsResourceWithStreamingResponse,
- AsyncDatasetsResourceWithStreamingResponse,
-)
from .providers import (
ProvidersResource,
AsyncProvidersResource,
@@ -120,14 +112,6 @@
VectorIoResourceWithStreamingResponse,
AsyncVectorIoResourceWithStreamingResponse,
)
-from .benchmarks import (
- BenchmarksResource,
- AsyncBenchmarksResource,
- BenchmarksResourceWithRawResponse,
- AsyncBenchmarksResourceWithRawResponse,
- BenchmarksResourceWithStreamingResponse,
- AsyncBenchmarksResourceWithStreamingResponse,
-)
from .embeddings import (
EmbeddingsResource,
AsyncEmbeddingsResource,
@@ -226,12 +210,6 @@
"AsyncResponsesResourceWithRawResponse",
"ResponsesResourceWithStreamingResponse",
"AsyncResponsesResourceWithStreamingResponse",
- "DatasetsResource",
- "AsyncDatasetsResource",
- "DatasetsResourceWithRawResponse",
- "AsyncDatasetsResourceWithRawResponse",
- "DatasetsResourceWithStreamingResponse",
- "AsyncDatasetsResourceWithStreamingResponse",
"InspectResource",
"AsyncInspectResource",
"InspectResourceWithRawResponse",
@@ -334,12 +312,6 @@
"AsyncScoringFunctionsResourceWithRawResponse",
"ScoringFunctionsResourceWithStreamingResponse",
"AsyncScoringFunctionsResourceWithStreamingResponse",
- "BenchmarksResource",
- "AsyncBenchmarksResource",
- "BenchmarksResourceWithRawResponse",
- "AsyncBenchmarksResourceWithRawResponse",
- "BenchmarksResourceWithStreamingResponse",
- "AsyncBenchmarksResourceWithStreamingResponse",
"FilesResource",
"AsyncFilesResource",
"FilesResourceWithRawResponse",
diff --git a/src/llama_stack_client/resources/benchmarks.py b/src/llama_stack_client/resources/benchmarks.py
deleted file mode 100644
index 92b8a0c1..00000000
--- a/src/llama_stack_client/resources/benchmarks.py
+++ /dev/null
@@ -1,359 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Type, Union, Iterable, cast
-
-import httpx
-
-from ..types import benchmark_register_params
-from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._wrappers import DataWrapper
-from .._base_client import make_request_options
-from ..types.benchmark import Benchmark
-from ..types.benchmark_list_response import BenchmarkListResponse
-
-__all__ = ["BenchmarksResource", "AsyncBenchmarksResource"]
-
-
-class BenchmarksResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> BenchmarksResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return BenchmarksResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> BenchmarksResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return BenchmarksResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- benchmark_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Benchmark:
- """
- Get a benchmark by its ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- return self._get(
- f"/v1/eval/benchmarks/{benchmark_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Benchmark,
- )
-
- def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BenchmarkListResponse:
- """List all benchmarks."""
- return self._get(
- "/v1/eval/benchmarks",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[BenchmarkListResponse]._unwrapper,
- ),
- cast_to=cast(Type[BenchmarkListResponse], DataWrapper[BenchmarkListResponse]),
- )
-
- def register(
- self,
- *,
- benchmark_id: str,
- dataset_id: str,
- scoring_functions: SequenceNotStr[str],
- metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | Omit = omit,
- provider_benchmark_id: str | Omit = omit,
- provider_id: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Register a benchmark.
-
- Args:
- benchmark_id: The ID of the benchmark to register.
-
- dataset_id: The ID of the dataset to use for the benchmark.
-
- scoring_functions: The scoring functions to use for the benchmark.
-
- metadata: The metadata to use for the benchmark.
-
- provider_benchmark_id: The ID of the provider benchmark to use for the benchmark.
-
- provider_id: The ID of the provider to use for the benchmark.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._post(
- "/v1/eval/benchmarks",
- body=maybe_transform(
- {
- "benchmark_id": benchmark_id,
- "dataset_id": dataset_id,
- "scoring_functions": scoring_functions,
- "metadata": metadata,
- "provider_benchmark_id": provider_benchmark_id,
- "provider_id": provider_id,
- },
- benchmark_register_params.BenchmarkRegisterParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
-
-class AsyncBenchmarksResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncBenchmarksResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncBenchmarksResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncBenchmarksResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return AsyncBenchmarksResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- benchmark_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Benchmark:
- """
- Get a benchmark by its ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- return await self._get(
- f"/v1/eval/benchmarks/{benchmark_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Benchmark,
- )
-
- async def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BenchmarkListResponse:
- """List all benchmarks."""
- return await self._get(
- "/v1/eval/benchmarks",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[BenchmarkListResponse]._unwrapper,
- ),
- cast_to=cast(Type[BenchmarkListResponse], DataWrapper[BenchmarkListResponse]),
- )
-
- async def register(
- self,
- *,
- benchmark_id: str,
- dataset_id: str,
- scoring_functions: SequenceNotStr[str],
- metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | Omit = omit,
- provider_benchmark_id: str | Omit = omit,
- provider_id: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Register a benchmark.
-
- Args:
- benchmark_id: The ID of the benchmark to register.
-
- dataset_id: The ID of the dataset to use for the benchmark.
-
- scoring_functions: The scoring functions to use for the benchmark.
-
- metadata: The metadata to use for the benchmark.
-
- provider_benchmark_id: The ID of the provider benchmark to use for the benchmark.
-
- provider_id: The ID of the provider to use for the benchmark.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._post(
- "/v1/eval/benchmarks",
- body=await async_maybe_transform(
- {
- "benchmark_id": benchmark_id,
- "dataset_id": dataset_id,
- "scoring_functions": scoring_functions,
- "metadata": metadata,
- "provider_benchmark_id": provider_benchmark_id,
- "provider_id": provider_id,
- },
- benchmark_register_params.BenchmarkRegisterParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
-
-class BenchmarksResourceWithRawResponse:
- def __init__(self, benchmarks: BenchmarksResource) -> None:
- self._benchmarks = benchmarks
-
- self.retrieve = to_raw_response_wrapper(
- benchmarks.retrieve,
- )
- self.list = to_raw_response_wrapper(
- benchmarks.list,
- )
- self.register = to_raw_response_wrapper(
- benchmarks.register,
- )
-
-
-class AsyncBenchmarksResourceWithRawResponse:
- def __init__(self, benchmarks: AsyncBenchmarksResource) -> None:
- self._benchmarks = benchmarks
-
- self.retrieve = async_to_raw_response_wrapper(
- benchmarks.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- benchmarks.list,
- )
- self.register = async_to_raw_response_wrapper(
- benchmarks.register,
- )
-
-
-class BenchmarksResourceWithStreamingResponse:
- def __init__(self, benchmarks: BenchmarksResource) -> None:
- self._benchmarks = benchmarks
-
- self.retrieve = to_streamed_response_wrapper(
- benchmarks.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- benchmarks.list,
- )
- self.register = to_streamed_response_wrapper(
- benchmarks.register,
- )
-
-
-class AsyncBenchmarksResourceWithStreamingResponse:
- def __init__(self, benchmarks: AsyncBenchmarksResource) -> None:
- self._benchmarks = benchmarks
-
- self.retrieve = async_to_streamed_response_wrapper(
- benchmarks.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- benchmarks.list,
- )
- self.register = async_to_streamed_response_wrapper(
- benchmarks.register,
- )
diff --git a/src/llama_stack_client/resources/datasets.py b/src/llama_stack_client/resources/datasets.py
deleted file mode 100644
index e3a2af6d..00000000
--- a/src/llama_stack_client/resources/datasets.py
+++ /dev/null
@@ -1,676 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Type, Union, Iterable, cast
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import dataset_iterrows_params, dataset_register_params, dataset_appendrows_params
-from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._wrappers import DataWrapper
-from .._base_client import make_request_options
-from ..types.dataset_list_response import DatasetListResponse
-from ..types.dataset_iterrows_response import DatasetIterrowsResponse
-from ..types.dataset_register_response import DatasetRegisterResponse
-from ..types.dataset_retrieve_response import DatasetRetrieveResponse
-
-__all__ = ["DatasetsResource", "AsyncDatasetsResource"]
-
-
-class DatasetsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> DatasetsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return DatasetsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> DatasetsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return DatasetsResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- dataset_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetRetrieveResponse:
- """
- Get a dataset by its ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- return self._get(
- f"/v1/datasets/{dataset_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=DatasetRetrieveResponse,
- )
-
- def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetListResponse:
- """List all datasets."""
- return self._get(
- "/v1/datasets",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[DatasetListResponse]._unwrapper,
- ),
- cast_to=cast(Type[DatasetListResponse], DataWrapper[DatasetListResponse]),
- )
-
- def appendrows(
- self,
- dataset_id: str,
- *,
- rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Append rows to a dataset.
-
- Args:
- rows: The rows to append to the dataset.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._post(
- f"/v1/datasetio/append-rows/{dataset_id}",
- body=maybe_transform({"rows": rows}, dataset_appendrows_params.DatasetAppendrowsParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- def iterrows(
- self,
- dataset_id: str,
- *,
- limit: int | Omit = omit,
- start_index: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetIterrowsResponse:
- """Get a paginated list of rows from a dataset.
-
- Uses offset-based pagination where:
-
- - start_index: The starting index (0-based). If None, starts from beginning.
- - limit: Number of items to return. If None or -1, returns all items.
-
- The response includes:
-
- - data: List of items for the current page.
- - has_more: Whether there are more items available after this set.
-
- Args:
- limit: The number of rows to get.
-
- start_index: Index into dataset for the first row to get. Get all rows if None.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- return self._get(
- f"/v1/datasetio/iterrows/{dataset_id}",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "limit": limit,
- "start_index": start_index,
- },
- dataset_iterrows_params.DatasetIterrowsParams,
- ),
- ),
- cast_to=DatasetIterrowsResponse,
- )
-
- def register(
- self,
- *,
- purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"],
- source: dataset_register_params.Source,
- dataset_id: str | Omit = omit,
- metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetRegisterResponse:
- """Register a new dataset.
-
- Args:
- purpose: The purpose of the dataset.
-
- One of: - "post-training/messages": The dataset
- contains a messages column with list of messages for post-training. {
- "messages": [ {"role": "user", "content": "Hello, world!"}, {"role":
- "assistant", "content": "Hello, world!"}, ] } - "eval/question-answer": The
- dataset contains a question column and an answer column for evaluation. {
- "question": "What is the capital of France?", "answer": "Paris" } -
- "eval/messages-answer": The dataset contains a messages column with list of
- messages and an answer column for evaluation. { "messages": [ {"role": "user",
- "content": "Hello, my name is John Doe."}, {"role": "assistant", "content":
- "Hello, John Doe. How can I help you today?"}, {"role": "user", "content":
- "What's my name?"}, ], "answer": "John Doe" }
-
- source: The data source of the dataset. Ensure that the data source schema is compatible
- with the purpose of the dataset. Examples: - { "type": "uri", "uri":
- "https://mywebsite.com/mydata.jsonl" } - { "type": "uri", "uri":
- "lsfs://mydata.jsonl" } - { "type": "uri", "uri":
- "data:csv;base64,{base64_content}" } - { "type": "uri", "uri":
- "huggingface://llamastack/simpleqa?split=train" } - { "type": "rows", "rows": [
- { "messages": [ {"role": "user", "content": "Hello, world!"}, {"role":
- "assistant", "content": "Hello, world!"}, ] } ] }
-
- dataset_id: The ID of the dataset. If not provided, an ID will be generated.
-
- metadata: The metadata for the dataset. - E.g. {"description": "My dataset"}.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v1/datasets",
- body=maybe_transform(
- {
- "purpose": purpose,
- "source": source,
- "dataset_id": dataset_id,
- "metadata": metadata,
- },
- dataset_register_params.DatasetRegisterParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=DatasetRegisterResponse,
- )
-
- def unregister(
- self,
- dataset_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Unregister a dataset by its ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._delete(
- f"/v1/datasets/{dataset_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
-
-class AsyncDatasetsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncDatasetsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncDatasetsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncDatasetsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return AsyncDatasetsResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- dataset_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetRetrieveResponse:
- """
- Get a dataset by its ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- return await self._get(
- f"/v1/datasets/{dataset_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=DatasetRetrieveResponse,
- )
-
- async def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetListResponse:
- """List all datasets."""
- return await self._get(
- "/v1/datasets",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[DatasetListResponse]._unwrapper,
- ),
- cast_to=cast(Type[DatasetListResponse], DataWrapper[DatasetListResponse]),
- )
-
- async def appendrows(
- self,
- dataset_id: str,
- *,
- rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Append rows to a dataset.
-
- Args:
- rows: The rows to append to the dataset.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._post(
- f"/v1/datasetio/append-rows/{dataset_id}",
- body=await async_maybe_transform({"rows": rows}, dataset_appendrows_params.DatasetAppendrowsParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- async def iterrows(
- self,
- dataset_id: str,
- *,
- limit: int | Omit = omit,
- start_index: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetIterrowsResponse:
- """Get a paginated list of rows from a dataset.
-
- Uses offset-based pagination where:
-
- - start_index: The starting index (0-based). If None, starts from beginning.
- - limit: Number of items to return. If None or -1, returns all items.
-
- The response includes:
-
- - data: List of items for the current page.
- - has_more: Whether there are more items available after this set.
-
- Args:
- limit: The number of rows to get.
-
- start_index: Index into dataset for the first row to get. Get all rows if None.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- return await self._get(
- f"/v1/datasetio/iterrows/{dataset_id}",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "limit": limit,
- "start_index": start_index,
- },
- dataset_iterrows_params.DatasetIterrowsParams,
- ),
- ),
- cast_to=DatasetIterrowsResponse,
- )
-
- async def register(
- self,
- *,
- purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"],
- source: dataset_register_params.Source,
- dataset_id: str | Omit = omit,
- metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetRegisterResponse:
- """Register a new dataset.
-
- Args:
- purpose: The purpose of the dataset.
-
- One of: - "post-training/messages": The dataset
- contains a messages column with list of messages for post-training. {
- "messages": [ {"role": "user", "content": "Hello, world!"}, {"role":
- "assistant", "content": "Hello, world!"}, ] } - "eval/question-answer": The
- dataset contains a question column and an answer column for evaluation. {
- "question": "What is the capital of France?", "answer": "Paris" } -
- "eval/messages-answer": The dataset contains a messages column with list of
- messages and an answer column for evaluation. { "messages": [ {"role": "user",
- "content": "Hello, my name is John Doe."}, {"role": "assistant", "content":
- "Hello, John Doe. How can I help you today?"}, {"role": "user", "content":
- "What's my name?"}, ], "answer": "John Doe" }
-
- source: The data source of the dataset. Ensure that the data source schema is compatible
- with the purpose of the dataset. Examples: - { "type": "uri", "uri":
- "https://mywebsite.com/mydata.jsonl" } - { "type": "uri", "uri":
- "lsfs://mydata.jsonl" } - { "type": "uri", "uri":
- "data:csv;base64,{base64_content}" } - { "type": "uri", "uri":
- "huggingface://llamastack/simpleqa?split=train" } - { "type": "rows", "rows": [
- { "messages": [ {"role": "user", "content": "Hello, world!"}, {"role":
- "assistant", "content": "Hello, world!"}, ] } ] }
-
- dataset_id: The ID of the dataset. If not provided, an ID will be generated.
-
- metadata: The metadata for the dataset. - E.g. {"description": "My dataset"}.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v1/datasets",
- body=await async_maybe_transform(
- {
- "purpose": purpose,
- "source": source,
- "dataset_id": dataset_id,
- "metadata": metadata,
- },
- dataset_register_params.DatasetRegisterParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=DatasetRegisterResponse,
- )
-
- async def unregister(
- self,
- dataset_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Unregister a dataset by its ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._delete(
- f"/v1/datasets/{dataset_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
-
-class DatasetsResourceWithRawResponse:
- def __init__(self, datasets: DatasetsResource) -> None:
- self._datasets = datasets
-
- self.retrieve = to_raw_response_wrapper(
- datasets.retrieve,
- )
- self.list = to_raw_response_wrapper(
- datasets.list,
- )
- self.appendrows = to_raw_response_wrapper(
- datasets.appendrows,
- )
- self.iterrows = to_raw_response_wrapper(
- datasets.iterrows,
- )
- self.register = to_raw_response_wrapper(
- datasets.register,
- )
- self.unregister = to_raw_response_wrapper(
- datasets.unregister,
- )
-
-
-class AsyncDatasetsResourceWithRawResponse:
- def __init__(self, datasets: AsyncDatasetsResource) -> None:
- self._datasets = datasets
-
- self.retrieve = async_to_raw_response_wrapper(
- datasets.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- datasets.list,
- )
- self.appendrows = async_to_raw_response_wrapper(
- datasets.appendrows,
- )
- self.iterrows = async_to_raw_response_wrapper(
- datasets.iterrows,
- )
- self.register = async_to_raw_response_wrapper(
- datasets.register,
- )
- self.unregister = async_to_raw_response_wrapper(
- datasets.unregister,
- )
-
-
-class DatasetsResourceWithStreamingResponse:
- def __init__(self, datasets: DatasetsResource) -> None:
- self._datasets = datasets
-
- self.retrieve = to_streamed_response_wrapper(
- datasets.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- datasets.list,
- )
- self.appendrows = to_streamed_response_wrapper(
- datasets.appendrows,
- )
- self.iterrows = to_streamed_response_wrapper(
- datasets.iterrows,
- )
- self.register = to_streamed_response_wrapper(
- datasets.register,
- )
- self.unregister = to_streamed_response_wrapper(
- datasets.unregister,
- )
-
-
-class AsyncDatasetsResourceWithStreamingResponse:
- def __init__(self, datasets: AsyncDatasetsResource) -> None:
- self._datasets = datasets
-
- self.retrieve = async_to_streamed_response_wrapper(
- datasets.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- datasets.list,
- )
- self.appendrows = async_to_streamed_response_wrapper(
- datasets.appendrows,
- )
- self.iterrows = async_to_streamed_response_wrapper(
- datasets.iterrows,
- )
- self.register = async_to_streamed_response_wrapper(
- datasets.register,
- )
- self.unregister = async_to_streamed_response_wrapper(
- datasets.unregister,
- )
diff --git a/src/llama_stack_client/resources/responses/responses.py b/src/llama_stack_client/resources/responses/responses.py
index 16e38fd0..418001eb 100644
--- a/src/llama_stack_client/resources/responses/responses.py
+++ b/src/llama_stack_client/resources/responses/responses.py
@@ -2,14 +2,13 @@
from __future__ import annotations
-from typing import Union, Iterable
from typing_extensions import Literal, overload
import httpx
from ...types import response_list_params, response_create_params
-from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
-from ..._utils import required_args, maybe_transform, async_maybe_transform
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -32,6 +31,7 @@
from ...types.response_object import ResponseObject
from ...types.response_list_response import ResponseListResponse
from ...types.response_object_stream import ResponseObjectStream
+from ...types.response_create_response import ResponseCreateResponse
from ...types.response_delete_response import ResponseDeleteResponse
__all__ = ["ResponsesResource", "AsyncResponsesResource"]
@@ -65,87 +65,28 @@ def with_streaming_response(self) -> ResponsesResourceWithStreamingResponse:
def create(
self,
*,
- input: Union[str, Iterable[response_create_params.InputUnionMember1]],
- model: str,
- include: SequenceNotStr[str] | Omit = omit,
- instructions: str | Omit = omit,
- max_infer_iters: int | Omit = omit,
- previous_response_id: str | Omit = omit,
- store: bool | Omit = omit,
- stream: Literal[False] | Omit = omit,
- temperature: float | Omit = omit,
- text: response_create_params.Text | Omit = omit,
- tools: Iterable[response_create_params.Tool] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ResponseObject:
- """
- Create a new OpenAI response.
-
- Args:
- input: Input message(s) to create the response.
-
- model: The underlying LLM used for completions.
-
- include: (Optional) Additional fields to include in the response.
-
- previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
- response. This can be used to easily fork-off new responses from existing
- responses.
-
- text: Text response configuration for OpenAI responses.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
-
- @overload
- def create(
- self,
- *,
- input: Union[str, Iterable[response_create_params.InputUnionMember1]],
- model: str,
- stream: Literal[True],
- include: SequenceNotStr[str] | Omit = omit,
- instructions: str | Omit = omit,
- max_infer_iters: int | Omit = omit,
- previous_response_id: str | Omit = omit,
- store: bool | Omit = omit,
- temperature: float | Omit = omit,
- text: response_create_params.Text | Omit = omit,
- tools: Iterable[response_create_params.Tool] | Omit = omit,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ model: str | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Stream[ResponseObjectStream]:
+ ) -> ResponseCreateResponse:
"""
- Create a new OpenAI response.
+ List all OpenAI responses.
Args:
- input: Input message(s) to create the response.
-
- model: The underlying LLM used for completions.
+ after: The ID of the last response to return.
- include: (Optional) Additional fields to include in the response.
+ limit: The number of responses to return.
- previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
- response. This can be used to easily fork-off new responses from existing
- responses.
+ model: The model to filter responses by.
- text: Text response configuration for OpenAI responses.
+ order: The order to sort responses by when sorted by created_at ('asc' or 'desc').
extra_headers: Send extra headers
@@ -161,39 +102,28 @@ def create(
def create(
self,
*,
- input: Union[str, Iterable[response_create_params.InputUnionMember1]],
- model: str,
- stream: bool,
- include: SequenceNotStr[str] | Omit = omit,
- instructions: str | Omit = omit,
- max_infer_iters: int | Omit = omit,
- previous_response_id: str | Omit = omit,
- store: bool | Omit = omit,
- temperature: float | Omit = omit,
- text: response_create_params.Text | Omit = omit,
- tools: Iterable[response_create_params.Tool] | Omit = omit,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ model: str | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ResponseObject | Stream[ResponseObjectStream]:
+ ) -> ResponseCreateResponse:
"""
- Create a new OpenAI response.
+ List all OpenAI responses.
Args:
- input: Input message(s) to create the response.
-
- model: The underlying LLM used for completions.
+ after: The ID of the last response to return.
- include: (Optional) Additional fields to include in the response.
+ limit: The number of responses to return.
- previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
- response. This can be used to easily fork-off new responses from existing
- responses.
+ model: The model to filter responses by.
- text: Text response configuration for OpenAI responses.
+ order: The order to sort responses by when sorted by created_at ('asc' or 'desc').
extra_headers: Send extra headers
@@ -205,52 +135,35 @@ def create(
"""
...
- @required_args(["input", "model"], ["input", "model", "stream"])
def create(
self,
*,
- input: Union[str, Iterable[response_create_params.InputUnionMember1]],
- model: str,
- include: SequenceNotStr[str] | Omit = omit,
- instructions: str | Omit = omit,
- max_infer_iters: int | Omit = omit,
- previous_response_id: str | Omit = omit,
- store: bool | Omit = omit,
- stream: Literal[False] | Literal[True] | Omit = omit,
- temperature: float | Omit = omit,
- text: response_create_params.Text | Omit = omit,
- tools: Iterable[response_create_params.Tool] | Omit = omit,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ model: str | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ResponseObject | Stream[ResponseObjectStream]:
+ ) -> ResponseCreateResponse | Stream[ResponseObjectStream]:
return self._post(
"/v1/responses",
body=maybe_transform(
{
- "input": input,
+ "after": after,
+ "limit": limit,
"model": model,
- "include": include,
- "instructions": instructions,
- "max_infer_iters": max_infer_iters,
- "previous_response_id": previous_response_id,
- "store": store,
- "stream": stream,
- "temperature": temperature,
- "text": text,
- "tools": tools,
+ "order": order,
},
- response_create_params.ResponseCreateParamsStreaming
- if stream
- else response_create_params.ResponseCreateParamsNonStreaming,
+ response_create_params.ResponseCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=ResponseObject,
+ cast_to=ResponseCreateResponse,
stream=stream or False,
stream_cls=Stream[ResponseObjectStream],
)
@@ -405,87 +318,28 @@ def with_streaming_response(self) -> AsyncResponsesResourceWithStreamingResponse
async def create(
self,
*,
- input: Union[str, Iterable[response_create_params.InputUnionMember1]],
- model: str,
- include: SequenceNotStr[str] | Omit = omit,
- instructions: str | Omit = omit,
- max_infer_iters: int | Omit = omit,
- previous_response_id: str | Omit = omit,
- store: bool | Omit = omit,
- stream: Literal[False] | Omit = omit,
- temperature: float | Omit = omit,
- text: response_create_params.Text | Omit = omit,
- tools: Iterable[response_create_params.Tool] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ResponseObject:
- """
- Create a new OpenAI response.
-
- Args:
- input: Input message(s) to create the response.
-
- model: The underlying LLM used for completions.
-
- include: (Optional) Additional fields to include in the response.
-
- previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
- response. This can be used to easily fork-off new responses from existing
- responses.
-
- text: Text response configuration for OpenAI responses.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
-
- @overload
- async def create(
- self,
- *,
- input: Union[str, Iterable[response_create_params.InputUnionMember1]],
- model: str,
- stream: Literal[True],
- include: SequenceNotStr[str] | Omit = omit,
- instructions: str | Omit = omit,
- max_infer_iters: int | Omit = omit,
- previous_response_id: str | Omit = omit,
- store: bool | Omit = omit,
- temperature: float | Omit = omit,
- text: response_create_params.Text | Omit = omit,
- tools: Iterable[response_create_params.Tool] | Omit = omit,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ model: str | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> AsyncStream[ResponseObjectStream]:
+ ) -> ResponseCreateResponse:
"""
- Create a new OpenAI response.
+ List all OpenAI responses.
Args:
- input: Input message(s) to create the response.
-
- model: The underlying LLM used for completions.
+ after: The ID of the last response to return.
- include: (Optional) Additional fields to include in the response.
+ limit: The number of responses to return.
- previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
- response. This can be used to easily fork-off new responses from existing
- responses.
+ model: The model to filter responses by.
- text: Text response configuration for OpenAI responses.
+ order: The order to sort responses by when sorted by created_at ('asc' or 'desc').
extra_headers: Send extra headers
@@ -501,39 +355,28 @@ async def create(
async def create(
self,
*,
- input: Union[str, Iterable[response_create_params.InputUnionMember1]],
- model: str,
- stream: bool,
- include: SequenceNotStr[str] | Omit = omit,
- instructions: str | Omit = omit,
- max_infer_iters: int | Omit = omit,
- previous_response_id: str | Omit = omit,
- store: bool | Omit = omit,
- temperature: float | Omit = omit,
- text: response_create_params.Text | Omit = omit,
- tools: Iterable[response_create_params.Tool] | Omit = omit,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ model: str | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ResponseObject | AsyncStream[ResponseObjectStream]:
+ ) -> ResponseCreateResponse:
"""
- Create a new OpenAI response.
+ List all OpenAI responses.
Args:
- input: Input message(s) to create the response.
-
- model: The underlying LLM used for completions.
+ after: The ID of the last response to return.
- include: (Optional) Additional fields to include in the response.
+ limit: The number of responses to return.
- previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
- response. This can be used to easily fork-off new responses from existing
- responses.
+ model: The model to filter responses by.
- text: Text response configuration for OpenAI responses.
+ order: The order to sort responses by when sorted by created_at ('asc' or 'desc').
extra_headers: Send extra headers
@@ -545,52 +388,35 @@ async def create(
"""
...
- @required_args(["input", "model"], ["input", "model", "stream"])
async def create(
self,
*,
- input: Union[str, Iterable[response_create_params.InputUnionMember1]],
- model: str,
- include: SequenceNotStr[str] | Omit = omit,
- instructions: str | Omit = omit,
- max_infer_iters: int | Omit = omit,
- previous_response_id: str | Omit = omit,
- store: bool | Omit = omit,
- stream: Literal[False] | Literal[True] | Omit = omit,
- temperature: float | Omit = omit,
- text: response_create_params.Text | Omit = omit,
- tools: Iterable[response_create_params.Tool] | Omit = omit,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ model: str | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ResponseObject | AsyncStream[ResponseObjectStream]:
+ ) -> ResponseCreateResponse | AsyncStream[ResponseObjectStream]:
return await self._post(
"/v1/responses",
body=await async_maybe_transform(
{
- "input": input,
+ "after": after,
+ "limit": limit,
"model": model,
- "include": include,
- "instructions": instructions,
- "max_infer_iters": max_infer_iters,
- "previous_response_id": previous_response_id,
- "store": store,
- "stream": stream,
- "temperature": temperature,
- "text": text,
- "tools": tools,
+ "order": order,
},
- response_create_params.ResponseCreateParamsStreaming
- if stream
- else response_create_params.ResponseCreateParamsNonStreaming,
+ response_create_params.ResponseCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=ResponseObject,
+ cast_to=ResponseCreateResponse,
stream=stream or False,
stream_cls=AsyncStream[ResponseObjectStream],
)
diff --git a/src/llama_stack_client/resources/telemetry.py b/src/llama_stack_client/resources/telemetry.py
index daafbb50..7210c834 100644
--- a/src/llama_stack_client/resources/telemetry.py
+++ b/src/llama_stack_client/resources/telemetry.py
@@ -2,20 +2,10 @@
from __future__ import annotations
-from typing import Type, Iterable, cast
-from typing_extensions import Literal
-
import httpx
-from ..types import (
- telemetry_log_event_params,
- telemetry_query_spans_params,
- telemetry_query_traces_params,
- telemetry_get_span_tree_params,
- telemetry_query_metrics_params,
- telemetry_save_spans_to_dataset_params,
-)
-from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
+from ..types import telemetry_log_event_params
+from .._types import Body, Query, Headers, NoneType, NotGiven, not_given
from .._utils import maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
@@ -25,16 +15,8 @@
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from .._wrappers import DataWrapper
-from ..types.trace import Trace
from .._base_client import make_request_options
from ..types.event_param import EventParam
-from ..types.query_condition_param import QueryConditionParam
-from ..types.telemetry_get_span_response import TelemetryGetSpanResponse
-from ..types.telemetry_query_spans_response import TelemetryQuerySpansResponse
-from ..types.telemetry_query_traces_response import TelemetryQueryTracesResponse
-from ..types.telemetry_get_span_tree_response import TelemetryGetSpanTreeResponse
-from ..types.telemetry_query_metrics_response import TelemetryQueryMetricsResponse
__all__ = ["TelemetryResource", "AsyncTelemetryResource"]
@@ -59,125 +41,6 @@ def with_streaming_response(self) -> TelemetryResourceWithStreamingResponse:
"""
return TelemetryResourceWithStreamingResponse(self)
- def get_span(
- self,
- span_id: str,
- *,
- trace_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> TelemetryGetSpanResponse:
- """
- Get a span by its ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not trace_id:
- raise ValueError(f"Expected a non-empty value for `trace_id` but received {trace_id!r}")
- if not span_id:
- raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}")
- return self._get(
- f"/v1/telemetry/traces/{trace_id}/spans/{span_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=TelemetryGetSpanResponse,
- )
-
- def get_span_tree(
- self,
- span_id: str,
- *,
- attributes_to_return: SequenceNotStr[str] | Omit = omit,
- max_depth: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> TelemetryGetSpanTreeResponse:
- """
- Get a span tree by its ID.
-
- Args:
- attributes_to_return: The attributes to return in the tree.
-
- max_depth: The maximum depth of the tree.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not span_id:
- raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}")
- return self._post(
- f"/v1/telemetry/spans/{span_id}/tree",
- body=maybe_transform(
- {
- "attributes_to_return": attributes_to_return,
- "max_depth": max_depth,
- },
- telemetry_get_span_tree_params.TelemetryGetSpanTreeParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[TelemetryGetSpanTreeResponse]._unwrapper,
- ),
- cast_to=cast(Type[TelemetryGetSpanTreeResponse], DataWrapper[TelemetryGetSpanTreeResponse]),
- )
-
- def get_trace(
- self,
- trace_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Trace:
- """
- Get a trace by its ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not trace_id:
- raise ValueError(f"Expected a non-empty value for `trace_id` but received {trace_id!r}")
- return self._get(
- f"/v1/telemetry/traces/{trace_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Trace,
- )
-
def log_event(
self,
*,
@@ -222,226 +85,6 @@ def log_event(
cast_to=NoneType,
)
- def query_metrics(
- self,
- metric_name: str,
- *,
- query_type: Literal["range", "instant"],
- start_time: int,
- end_time: int | Omit = omit,
- granularity: str | Omit = omit,
- label_matchers: Iterable[telemetry_query_metrics_params.LabelMatcher] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> TelemetryQueryMetricsResponse:
- """
- Query metrics.
-
- Args:
- query_type: The type of query to perform.
-
- start_time: The start time of the metric to query.
-
- end_time: The end time of the metric to query.
-
- granularity: The granularity of the metric to query.
-
- label_matchers: The label matchers to apply to the metric.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not metric_name:
- raise ValueError(f"Expected a non-empty value for `metric_name` but received {metric_name!r}")
- return self._post(
- f"/v1/telemetry/metrics/{metric_name}",
- body=maybe_transform(
- {
- "query_type": query_type,
- "start_time": start_time,
- "end_time": end_time,
- "granularity": granularity,
- "label_matchers": label_matchers,
- },
- telemetry_query_metrics_params.TelemetryQueryMetricsParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[TelemetryQueryMetricsResponse]._unwrapper,
- ),
- cast_to=cast(Type[TelemetryQueryMetricsResponse], DataWrapper[TelemetryQueryMetricsResponse]),
- )
-
- def query_spans(
- self,
- *,
- attribute_filters: Iterable[QueryConditionParam],
- attributes_to_return: SequenceNotStr[str],
- max_depth: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> TelemetryQuerySpansResponse:
- """
- Query spans.
-
- Args:
- attribute_filters: The attribute filters to apply to the spans.
-
- attributes_to_return: The attributes to return in the spans.
-
- max_depth: The maximum depth of the tree.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v1/telemetry/spans",
- body=maybe_transform(
- {
- "attribute_filters": attribute_filters,
- "attributes_to_return": attributes_to_return,
- "max_depth": max_depth,
- },
- telemetry_query_spans_params.TelemetryQuerySpansParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[TelemetryQuerySpansResponse]._unwrapper,
- ),
- cast_to=cast(Type[TelemetryQuerySpansResponse], DataWrapper[TelemetryQuerySpansResponse]),
- )
-
- def query_traces(
- self,
- *,
- attribute_filters: Iterable[QueryConditionParam] | Omit = omit,
- limit: int | Omit = omit,
- offset: int | Omit = omit,
- order_by: SequenceNotStr[str] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> TelemetryQueryTracesResponse:
- """
- Query traces.
-
- Args:
- attribute_filters: The attribute filters to apply to the traces.
-
- limit: The limit of traces to return.
-
- offset: The offset of the traces to return.
-
- order_by: The order by of the traces to return.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v1/telemetry/traces",
- body=maybe_transform(
- {
- "attribute_filters": attribute_filters,
- "limit": limit,
- "offset": offset,
- "order_by": order_by,
- },
- telemetry_query_traces_params.TelemetryQueryTracesParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[TelemetryQueryTracesResponse]._unwrapper,
- ),
- cast_to=cast(Type[TelemetryQueryTracesResponse], DataWrapper[TelemetryQueryTracesResponse]),
- )
-
- def save_spans_to_dataset(
- self,
- *,
- attribute_filters: Iterable[QueryConditionParam],
- attributes_to_save: SequenceNotStr[str],
- dataset_id: str,
- max_depth: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Save spans to a dataset.
-
- Args:
- attribute_filters: The attribute filters to apply to the spans.
-
- attributes_to_save: The attributes to save to the dataset.
-
- dataset_id: The ID of the dataset to save the spans to.
-
- max_depth: The maximum depth of the tree.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._post(
- "/v1/telemetry/spans/export",
- body=maybe_transform(
- {
- "attribute_filters": attribute_filters,
- "attributes_to_save": attributes_to_save,
- "dataset_id": dataset_id,
- "max_depth": max_depth,
- },
- telemetry_save_spans_to_dataset_params.TelemetrySaveSpansToDatasetParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
class AsyncTelemetryResource(AsyncAPIResource):
@cached_property
@@ -463,125 +106,6 @@ def with_streaming_response(self) -> AsyncTelemetryResourceWithStreamingResponse
"""
return AsyncTelemetryResourceWithStreamingResponse(self)
- async def get_span(
- self,
- span_id: str,
- *,
- trace_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> TelemetryGetSpanResponse:
- """
- Get a span by its ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not trace_id:
- raise ValueError(f"Expected a non-empty value for `trace_id` but received {trace_id!r}")
- if not span_id:
- raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}")
- return await self._get(
- f"/v1/telemetry/traces/{trace_id}/spans/{span_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=TelemetryGetSpanResponse,
- )
-
- async def get_span_tree(
- self,
- span_id: str,
- *,
- attributes_to_return: SequenceNotStr[str] | Omit = omit,
- max_depth: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> TelemetryGetSpanTreeResponse:
- """
- Get a span tree by its ID.
-
- Args:
- attributes_to_return: The attributes to return in the tree.
-
- max_depth: The maximum depth of the tree.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not span_id:
- raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}")
- return await self._post(
- f"/v1/telemetry/spans/{span_id}/tree",
- body=await async_maybe_transform(
- {
- "attributes_to_return": attributes_to_return,
- "max_depth": max_depth,
- },
- telemetry_get_span_tree_params.TelemetryGetSpanTreeParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[TelemetryGetSpanTreeResponse]._unwrapper,
- ),
- cast_to=cast(Type[TelemetryGetSpanTreeResponse], DataWrapper[TelemetryGetSpanTreeResponse]),
- )
-
- async def get_trace(
- self,
- trace_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Trace:
- """
- Get a trace by its ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not trace_id:
- raise ValueError(f"Expected a non-empty value for `trace_id` but received {trace_id!r}")
- return await self._get(
- f"/v1/telemetry/traces/{trace_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Trace,
- )
-
async def log_event(
self,
*,
@@ -626,342 +150,38 @@ async def log_event(
cast_to=NoneType,
)
- async def query_metrics(
- self,
- metric_name: str,
- *,
- query_type: Literal["range", "instant"],
- start_time: int,
- end_time: int | Omit = omit,
- granularity: str | Omit = omit,
- label_matchers: Iterable[telemetry_query_metrics_params.LabelMatcher] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> TelemetryQueryMetricsResponse:
- """
- Query metrics.
-
- Args:
- query_type: The type of query to perform.
-
- start_time: The start time of the metric to query.
-
- end_time: The end time of the metric to query.
-
- granularity: The granularity of the metric to query.
-
- label_matchers: The label matchers to apply to the metric.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not metric_name:
- raise ValueError(f"Expected a non-empty value for `metric_name` but received {metric_name!r}")
- return await self._post(
- f"/v1/telemetry/metrics/{metric_name}",
- body=await async_maybe_transform(
- {
- "query_type": query_type,
- "start_time": start_time,
- "end_time": end_time,
- "granularity": granularity,
- "label_matchers": label_matchers,
- },
- telemetry_query_metrics_params.TelemetryQueryMetricsParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[TelemetryQueryMetricsResponse]._unwrapper,
- ),
- cast_to=cast(Type[TelemetryQueryMetricsResponse], DataWrapper[TelemetryQueryMetricsResponse]),
- )
-
- async def query_spans(
- self,
- *,
- attribute_filters: Iterable[QueryConditionParam],
- attributes_to_return: SequenceNotStr[str],
- max_depth: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> TelemetryQuerySpansResponse:
- """
- Query spans.
-
- Args:
- attribute_filters: The attribute filters to apply to the spans.
-
- attributes_to_return: The attributes to return in the spans.
-
- max_depth: The maximum depth of the tree.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v1/telemetry/spans",
- body=await async_maybe_transform(
- {
- "attribute_filters": attribute_filters,
- "attributes_to_return": attributes_to_return,
- "max_depth": max_depth,
- },
- telemetry_query_spans_params.TelemetryQuerySpansParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[TelemetryQuerySpansResponse]._unwrapper,
- ),
- cast_to=cast(Type[TelemetryQuerySpansResponse], DataWrapper[TelemetryQuerySpansResponse]),
- )
-
- async def query_traces(
- self,
- *,
- attribute_filters: Iterable[QueryConditionParam] | Omit = omit,
- limit: int | Omit = omit,
- offset: int | Omit = omit,
- order_by: SequenceNotStr[str] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> TelemetryQueryTracesResponse:
- """
- Query traces.
-
- Args:
- attribute_filters: The attribute filters to apply to the traces.
-
- limit: The limit of traces to return.
-
- offset: The offset of the traces to return.
-
- order_by: The order by of the traces to return.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v1/telemetry/traces",
- body=await async_maybe_transform(
- {
- "attribute_filters": attribute_filters,
- "limit": limit,
- "offset": offset,
- "order_by": order_by,
- },
- telemetry_query_traces_params.TelemetryQueryTracesParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[TelemetryQueryTracesResponse]._unwrapper,
- ),
- cast_to=cast(Type[TelemetryQueryTracesResponse], DataWrapper[TelemetryQueryTracesResponse]),
- )
-
- async def save_spans_to_dataset(
- self,
- *,
- attribute_filters: Iterable[QueryConditionParam],
- attributes_to_save: SequenceNotStr[str],
- dataset_id: str,
- max_depth: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Save spans to a dataset.
-
- Args:
- attribute_filters: The attribute filters to apply to the spans.
-
- attributes_to_save: The attributes to save to the dataset.
-
- dataset_id: The ID of the dataset to save the spans to.
-
- max_depth: The maximum depth of the tree.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._post(
- "/v1/telemetry/spans/export",
- body=await async_maybe_transform(
- {
- "attribute_filters": attribute_filters,
- "attributes_to_save": attributes_to_save,
- "dataset_id": dataset_id,
- "max_depth": max_depth,
- },
- telemetry_save_spans_to_dataset_params.TelemetrySaveSpansToDatasetParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
class TelemetryResourceWithRawResponse:
def __init__(self, telemetry: TelemetryResource) -> None:
self._telemetry = telemetry
- self.get_span = to_raw_response_wrapper(
- telemetry.get_span,
- )
- self.get_span_tree = to_raw_response_wrapper(
- telemetry.get_span_tree,
- )
- self.get_trace = to_raw_response_wrapper(
- telemetry.get_trace,
- )
self.log_event = to_raw_response_wrapper(
telemetry.log_event,
)
- self.query_metrics = to_raw_response_wrapper(
- telemetry.query_metrics,
- )
- self.query_spans = to_raw_response_wrapper(
- telemetry.query_spans,
- )
- self.query_traces = to_raw_response_wrapper(
- telemetry.query_traces,
- )
- self.save_spans_to_dataset = to_raw_response_wrapper(
- telemetry.save_spans_to_dataset,
- )
class AsyncTelemetryResourceWithRawResponse:
def __init__(self, telemetry: AsyncTelemetryResource) -> None:
self._telemetry = telemetry
- self.get_span = async_to_raw_response_wrapper(
- telemetry.get_span,
- )
- self.get_span_tree = async_to_raw_response_wrapper(
- telemetry.get_span_tree,
- )
- self.get_trace = async_to_raw_response_wrapper(
- telemetry.get_trace,
- )
self.log_event = async_to_raw_response_wrapper(
telemetry.log_event,
)
- self.query_metrics = async_to_raw_response_wrapper(
- telemetry.query_metrics,
- )
- self.query_spans = async_to_raw_response_wrapper(
- telemetry.query_spans,
- )
- self.query_traces = async_to_raw_response_wrapper(
- telemetry.query_traces,
- )
- self.save_spans_to_dataset = async_to_raw_response_wrapper(
- telemetry.save_spans_to_dataset,
- )
class TelemetryResourceWithStreamingResponse:
def __init__(self, telemetry: TelemetryResource) -> None:
self._telemetry = telemetry
- self.get_span = to_streamed_response_wrapper(
- telemetry.get_span,
- )
- self.get_span_tree = to_streamed_response_wrapper(
- telemetry.get_span_tree,
- )
- self.get_trace = to_streamed_response_wrapper(
- telemetry.get_trace,
- )
self.log_event = to_streamed_response_wrapper(
telemetry.log_event,
)
- self.query_metrics = to_streamed_response_wrapper(
- telemetry.query_metrics,
- )
- self.query_spans = to_streamed_response_wrapper(
- telemetry.query_spans,
- )
- self.query_traces = to_streamed_response_wrapper(
- telemetry.query_traces,
- )
- self.save_spans_to_dataset = to_streamed_response_wrapper(
- telemetry.save_spans_to_dataset,
- )
class AsyncTelemetryResourceWithStreamingResponse:
def __init__(self, telemetry: AsyncTelemetryResource) -> None:
self._telemetry = telemetry
- self.get_span = async_to_streamed_response_wrapper(
- telemetry.get_span,
- )
- self.get_span_tree = async_to_streamed_response_wrapper(
- telemetry.get_span_tree,
- )
- self.get_trace = async_to_streamed_response_wrapper(
- telemetry.get_trace,
- )
self.log_event = async_to_streamed_response_wrapper(
telemetry.log_event,
)
- self.query_metrics = async_to_streamed_response_wrapper(
- telemetry.query_metrics,
- )
- self.query_spans = async_to_streamed_response_wrapper(
- telemetry.query_spans,
- )
- self.query_traces = async_to_streamed_response_wrapper(
- telemetry.query_traces,
- )
- self.save_spans_to_dataset = async_to_streamed_response_wrapper(
- telemetry.save_spans_to_dataset,
- )
diff --git a/src/llama_stack_client/types/__init__.py b/src/llama_stack_client/types/__init__.py
index 0c3d0f34..c7053fec 100644
--- a/src/llama_stack_client/types/__init__.py
+++ b/src/llama_stack_client/types/__init__.py
@@ -5,7 +5,6 @@
from .file import File as File
from .tool import Tool as Tool
from .model import Model as Model
-from .trace import Trace as Trace
from .shared import (
Message as Message,
Document as Document,
@@ -27,7 +26,6 @@
)
from .shield import Shield as Shield
from .tool_def import ToolDef as ToolDef
-from .benchmark import Benchmark as Benchmark
from .route_info import RouteInfo as RouteInfo
from .scoring_fn import ScoringFn as ScoringFn
from .tool_group import ToolGroup as ToolGroup
@@ -40,7 +38,6 @@
from .create_response import CreateResponse as CreateResponse
from .response_object import ResponseObject as ResponseObject
from .file_list_params import FileListParams as FileListParams
-from .span_with_status import SpanWithStatus as SpanWithStatus
from .tool_list_params import ToolListParams as ToolListParams
from .scoring_fn_params import ScoringFnParams as ScoringFnParams
from .file_create_params import FileCreateParams as FileCreateParams
@@ -53,17 +50,13 @@
from .delete_file_response import DeleteFileResponse as DeleteFileResponse
from .list_models_response import ListModelsResponse as ListModelsResponse
from .list_routes_response import ListRoutesResponse as ListRoutesResponse
-from .query_spans_response import QuerySpansResponse as QuerySpansResponse
from .response_list_params import ResponseListParams as ResponseListParams
from .scoring_score_params import ScoringScoreParams as ScoringScoreParams
from .shield_list_response import ShieldListResponse as ShieldListResponse
from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
-from .dataset_list_response import DatasetListResponse as DatasetListResponse
from .list_shields_response import ListShieldsResponse as ListShieldsResponse
from .model_register_params import ModelRegisterParams as ModelRegisterParams
from .query_chunks_response import QueryChunksResponse as QueryChunksResponse
-from .query_condition_param import QueryConditionParam as QueryConditionParam
-from .list_datasets_response import ListDatasetsResponse as ListDatasetsResponse
from .provider_list_response import ProviderListResponse as ProviderListResponse
from .response_create_params import ResponseCreateParams as ResponseCreateParams
from .response_list_response import ResponseListResponse as ResponseListResponse
@@ -72,9 +65,6 @@
from .shield_register_params import ShieldRegisterParams as ShieldRegisterParams
from .tool_invocation_result import ToolInvocationResult as ToolInvocationResult
from .vector_io_query_params import VectorIoQueryParams as VectorIoQueryParams
-from .benchmark_list_response import BenchmarkListResponse as BenchmarkListResponse
-from .dataset_iterrows_params import DatasetIterrowsParams as DatasetIterrowsParams
-from .dataset_register_params import DatasetRegisterParams as DatasetRegisterParams
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
from .list_providers_response import ListProvidersResponse as ListProvidersResponse
from .scoring_fn_params_param import ScoringFnParamsParam as ScoringFnParamsParam
@@ -82,17 +72,12 @@
from .vector_db_list_response import VectorDBListResponse as VectorDBListResponse
from .vector_io_insert_params import VectorIoInsertParams as VectorIoInsertParams
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
-from .list_benchmarks_response import ListBenchmarksResponse as ListBenchmarksResponse
from .list_vector_dbs_response import ListVectorDBsResponse as ListVectorDBsResponse
from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
+from .response_create_response import ResponseCreateResponse as ResponseCreateResponse
from .response_delete_response import ResponseDeleteResponse as ResponseDeleteResponse
from .safety_run_shield_params import SafetyRunShieldParams as SafetyRunShieldParams
from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
-from .benchmark_register_params import BenchmarkRegisterParams as BenchmarkRegisterParams
-from .dataset_appendrows_params import DatasetAppendrowsParams as DatasetAppendrowsParams
-from .dataset_iterrows_response import DatasetIterrowsResponse as DatasetIterrowsResponse
-from .dataset_register_response import DatasetRegisterResponse as DatasetRegisterResponse
-from .dataset_retrieve_response import DatasetRetrieveResponse as DatasetRetrieveResponse
from .list_tool_groups_response import ListToolGroupsResponse as ListToolGroupsResponse
from .toolgroup_register_params import ToolgroupRegisterParams as ToolgroupRegisterParams
from .vector_db_register_params import VectorDBRegisterParams as VectorDBRegisterParams
@@ -104,30 +89,18 @@
from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
from .list_vector_stores_response import ListVectorStoresResponse as ListVectorStoresResponse
-from .telemetry_get_span_response import TelemetryGetSpanResponse as TelemetryGetSpanResponse
from .vector_db_register_response import VectorDBRegisterResponse as VectorDBRegisterResponse
from .vector_db_retrieve_response import VectorDBRetrieveResponse as VectorDBRetrieveResponse
from .scoring_score_batch_response import ScoringScoreBatchResponse as ScoringScoreBatchResponse
-from .telemetry_query_spans_params import TelemetryQuerySpansParams as TelemetryQuerySpansParams
from .vector_store_delete_response import VectorStoreDeleteResponse as VectorStoreDeleteResponse
from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
-from .telemetry_query_traces_params import TelemetryQueryTracesParams as TelemetryQueryTracesParams
from .scoring_function_list_response import ScoringFunctionListResponse as ScoringFunctionListResponse
-from .telemetry_get_span_tree_params import TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams
-from .telemetry_query_metrics_params import TelemetryQueryMetricsParams as TelemetryQueryMetricsParams
-from .telemetry_query_spans_response import TelemetryQuerySpansResponse as TelemetryQuerySpansResponse
from .tool_runtime_list_tools_params import ToolRuntimeListToolsParams as ToolRuntimeListToolsParams
from .list_scoring_functions_response import ListScoringFunctionsResponse as ListScoringFunctionsResponse
-from .telemetry_query_traces_response import TelemetryQueryTracesResponse as TelemetryQueryTracesResponse
from .tool_runtime_invoke_tool_params import ToolRuntimeInvokeToolParams as ToolRuntimeInvokeToolParams
from .scoring_function_register_params import ScoringFunctionRegisterParams as ScoringFunctionRegisterParams
-from .telemetry_get_span_tree_response import TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse
-from .telemetry_query_metrics_response import TelemetryQueryMetricsResponse as TelemetryQueryMetricsResponse
from .tool_runtime_list_tools_response import ToolRuntimeListToolsResponse as ToolRuntimeListToolsResponse
from .synthetic_data_generation_response import SyntheticDataGenerationResponse as SyntheticDataGenerationResponse
-from .telemetry_save_spans_to_dataset_params import (
- TelemetrySaveSpansToDatasetParams as TelemetrySaveSpansToDatasetParams,
-)
from .synthetic_data_generation_generate_params import (
SyntheticDataGenerationGenerateParams as SyntheticDataGenerationGenerateParams,
)
diff --git a/src/llama_stack_client/types/benchmark.py b/src/llama_stack_client/types/benchmark.py
deleted file mode 100644
index eb6dde75..00000000
--- a/src/llama_stack_client/types/benchmark.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["Benchmark"]
-
-
-class Benchmark(BaseModel):
- dataset_id: str
- """Identifier of the dataset to use for the benchmark evaluation"""
-
- identifier: str
-
- metadata: Dict[str, Union[bool, float, str, List[object], object, None]]
- """Metadata for this evaluation task"""
-
- provider_id: str
-
- scoring_functions: List[str]
- """List of scoring function identifiers to apply during evaluation"""
-
- type: Literal["benchmark"]
- """The resource type, always benchmark"""
-
- provider_resource_id: Optional[str] = None
diff --git a/src/llama_stack_client/types/benchmark_list_response.py b/src/llama_stack_client/types/benchmark_list_response.py
deleted file mode 100644
index b2e8ad2b..00000000
--- a/src/llama_stack_client/types/benchmark_list_response.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import TypeAlias
-
-from .benchmark import Benchmark
-
-__all__ = ["BenchmarkListResponse"]
-
-BenchmarkListResponse: TypeAlias = List[Benchmark]
diff --git a/src/llama_stack_client/types/benchmark_register_params.py b/src/llama_stack_client/types/benchmark_register_params.py
deleted file mode 100644
index 322e2da8..00000000
--- a/src/llama_stack_client/types/benchmark_register_params.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable
-from typing_extensions import Required, TypedDict
-
-from .._types import SequenceNotStr
-
-__all__ = ["BenchmarkRegisterParams"]
-
-
-class BenchmarkRegisterParams(TypedDict, total=False):
- benchmark_id: Required[str]
- """The ID of the benchmark to register."""
-
- dataset_id: Required[str]
- """The ID of the dataset to use for the benchmark."""
-
- scoring_functions: Required[SequenceNotStr[str]]
- """The scoring functions to use for the benchmark."""
-
- metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
- """The metadata to use for the benchmark."""
-
- provider_benchmark_id: str
- """The ID of the provider benchmark to use for the benchmark."""
-
- provider_id: str
- """The ID of the provider to use for the benchmark."""
diff --git a/src/llama_stack_client/types/dataset_appendrows_params.py b/src/llama_stack_client/types/dataset_appendrows_params.py
deleted file mode 100644
index 2e96e124..00000000
--- a/src/llama_stack_client/types/dataset_appendrows_params.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable
-from typing_extensions import Required, TypedDict
-
-__all__ = ["DatasetAppendrowsParams"]
-
-
-class DatasetAppendrowsParams(TypedDict, total=False):
- rows: Required[Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]]]
- """The rows to append to the dataset."""
diff --git a/src/llama_stack_client/types/dataset_iterrows_params.py b/src/llama_stack_client/types/dataset_iterrows_params.py
deleted file mode 100644
index 99065312..00000000
--- a/src/llama_stack_client/types/dataset_iterrows_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["DatasetIterrowsParams"]
-
-
-class DatasetIterrowsParams(TypedDict, total=False):
- limit: int
- """The number of rows to get."""
-
- start_index: int
- """Index into dataset for the first row to get. Get all rows if None."""
diff --git a/src/llama_stack_client/types/dataset_iterrows_response.py b/src/llama_stack_client/types/dataset_iterrows_response.py
deleted file mode 100644
index 8681b018..00000000
--- a/src/llama_stack_client/types/dataset_iterrows_response.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-
-from .._models import BaseModel
-
-__all__ = ["DatasetIterrowsResponse"]
-
-
-class DatasetIterrowsResponse(BaseModel):
- data: List[Dict[str, Union[bool, float, str, List[object], object, None]]]
- """The list of items for the current page"""
-
- has_more: bool
- """Whether there are more items available after this set"""
-
- url: Optional[str] = None
- """The URL for accessing this list"""
diff --git a/src/llama_stack_client/types/dataset_list_response.py b/src/llama_stack_client/types/dataset_list_response.py
deleted file mode 100644
index 7080e589..00000000
--- a/src/llama_stack_client/types/dataset_list_response.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from .._utils import PropertyInfo
-from .._models import BaseModel
-
-__all__ = [
- "DatasetListResponse",
- "DatasetListResponseItem",
- "DatasetListResponseItemSource",
- "DatasetListResponseItemSourceUriDataSource",
- "DatasetListResponseItemSourceRowsDataSource",
-]
-
-
-class DatasetListResponseItemSourceUriDataSource(BaseModel):
- type: Literal["uri"]
-
- uri: str
- """The dataset can be obtained from a URI.
-
- E.g. - "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" -
- "data:csv;base64,{base64_content}"
- """
-
-
-class DatasetListResponseItemSourceRowsDataSource(BaseModel):
- rows: List[Dict[str, Union[bool, float, str, List[object], object, None]]]
- """The dataset is stored in rows.
-
- E.g. - [ {"messages": [{"role": "user", "content": "Hello, world!"}, {"role":
- "assistant", "content": "Hello, world!"}]} ]
- """
-
- type: Literal["rows"]
-
-
-DatasetListResponseItemSource: TypeAlias = Annotated[
- Union[DatasetListResponseItemSourceUriDataSource, DatasetListResponseItemSourceRowsDataSource],
- PropertyInfo(discriminator="type"),
-]
-
-
-class DatasetListResponseItem(BaseModel):
- identifier: str
-
- metadata: Dict[str, Union[bool, float, str, List[object], object, None]]
- """Additional metadata for the dataset"""
-
- provider_id: str
-
- purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"]
- """Purpose of the dataset indicating its intended use"""
-
- source: DatasetListResponseItemSource
- """Data source configuration for the dataset"""
-
- type: Literal["dataset"]
- """Type of resource, always 'dataset' for datasets"""
-
- provider_resource_id: Optional[str] = None
-
-
-DatasetListResponse: TypeAlias = List[DatasetListResponseItem]
diff --git a/src/llama_stack_client/types/dataset_register_params.py b/src/llama_stack_client/types/dataset_register_params.py
deleted file mode 100644
index 6fd5db3f..00000000
--- a/src/llama_stack_client/types/dataset_register_params.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-__all__ = ["DatasetRegisterParams", "Source", "SourceUriDataSource", "SourceRowsDataSource"]
-
-
-class DatasetRegisterParams(TypedDict, total=False):
- purpose: Required[Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"]]
- """The purpose of the dataset.
-
- One of: - "post-training/messages": The dataset contains a messages column with
- list of messages for post-training. { "messages": [ {"role": "user", "content":
- "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}, ] } -
- "eval/question-answer": The dataset contains a question column and an answer
- column for evaluation. { "question": "What is the capital of France?", "answer":
- "Paris" } - "eval/messages-answer": The dataset contains a messages column with
- list of messages and an answer column for evaluation. { "messages": [ {"role":
- "user", "content": "Hello, my name is John Doe."}, {"role": "assistant",
- "content": "Hello, John Doe. How can I help you today?"}, {"role": "user",
- "content": "What's my name?"}, ], "answer": "John Doe" }
- """
-
- source: Required[Source]
- """The data source of the dataset.
-
- Ensure that the data source schema is compatible with the purpose of the
- dataset. Examples: - { "type": "uri", "uri":
- "https://mywebsite.com/mydata.jsonl" } - { "type": "uri", "uri":
- "lsfs://mydata.jsonl" } - { "type": "uri", "uri":
- "data:csv;base64,{base64_content}" } - { "type": "uri", "uri":
- "huggingface://llamastack/simpleqa?split=train" } - { "type": "rows", "rows": [
- { "messages": [ {"role": "user", "content": "Hello, world!"}, {"role":
- "assistant", "content": "Hello, world!"}, ] } ] }
- """
-
- dataset_id: str
- """The ID of the dataset. If not provided, an ID will be generated."""
-
- metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
- """The metadata for the dataset. - E.g. {"description": "My dataset"}."""
-
-
-class SourceUriDataSource(TypedDict, total=False):
- type: Required[Literal["uri"]]
-
- uri: Required[str]
- """The dataset can be obtained from a URI.
-
- E.g. - "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" -
- "data:csv;base64,{base64_content}"
- """
-
-
-class SourceRowsDataSource(TypedDict, total=False):
- rows: Required[Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]]]
- """The dataset is stored in rows.
-
- E.g. - [ {"messages": [{"role": "user", "content": "Hello, world!"}, {"role":
- "assistant", "content": "Hello, world!"}]} ]
- """
-
- type: Required[Literal["rows"]]
-
-
-Source: TypeAlias = Union[SourceUriDataSource, SourceRowsDataSource]
diff --git a/src/llama_stack_client/types/dataset_register_response.py b/src/llama_stack_client/types/dataset_register_response.py
deleted file mode 100644
index 8da590b8..00000000
--- a/src/llama_stack_client/types/dataset_register_response.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from .._utils import PropertyInfo
-from .._models import BaseModel
-
-__all__ = ["DatasetRegisterResponse", "Source", "SourceUriDataSource", "SourceRowsDataSource"]
-
-
-class SourceUriDataSource(BaseModel):
- type: Literal["uri"]
-
- uri: str
- """The dataset can be obtained from a URI.
-
- E.g. - "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" -
- "data:csv;base64,{base64_content}"
- """
-
-
-class SourceRowsDataSource(BaseModel):
- rows: List[Dict[str, Union[bool, float, str, List[object], object, None]]]
- """The dataset is stored in rows.
-
- E.g. - [ {"messages": [{"role": "user", "content": "Hello, world!"}, {"role":
- "assistant", "content": "Hello, world!"}]} ]
- """
-
- type: Literal["rows"]
-
-
-Source: TypeAlias = Annotated[Union[SourceUriDataSource, SourceRowsDataSource], PropertyInfo(discriminator="type")]
-
-
-class DatasetRegisterResponse(BaseModel):
- identifier: str
-
- metadata: Dict[str, Union[bool, float, str, List[object], object, None]]
- """Additional metadata for the dataset"""
-
- provider_id: str
-
- purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"]
- """Purpose of the dataset indicating its intended use"""
-
- source: Source
- """Data source configuration for the dataset"""
-
- type: Literal["dataset"]
- """Type of resource, always 'dataset' for datasets"""
-
- provider_resource_id: Optional[str] = None
diff --git a/src/llama_stack_client/types/dataset_retrieve_response.py b/src/llama_stack_client/types/dataset_retrieve_response.py
deleted file mode 100644
index 6cda0a42..00000000
--- a/src/llama_stack_client/types/dataset_retrieve_response.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from .._utils import PropertyInfo
-from .._models import BaseModel
-
-__all__ = ["DatasetRetrieveResponse", "Source", "SourceUriDataSource", "SourceRowsDataSource"]
-
-
-class SourceUriDataSource(BaseModel):
- type: Literal["uri"]
-
- uri: str
- """The dataset can be obtained from a URI.
-
- E.g. - "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" -
- "data:csv;base64,{base64_content}"
- """
-
-
-class SourceRowsDataSource(BaseModel):
- rows: List[Dict[str, Union[bool, float, str, List[object], object, None]]]
- """The dataset is stored in rows.
-
- E.g. - [ {"messages": [{"role": "user", "content": "Hello, world!"}, {"role":
- "assistant", "content": "Hello, world!"}]} ]
- """
-
- type: Literal["rows"]
-
-
-Source: TypeAlias = Annotated[Union[SourceUriDataSource, SourceRowsDataSource], PropertyInfo(discriminator="type")]
-
-
-class DatasetRetrieveResponse(BaseModel):
- identifier: str
-
- metadata: Dict[str, Union[bool, float, str, List[object], object, None]]
- """Additional metadata for the dataset"""
-
- provider_id: str
-
- purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"]
- """Purpose of the dataset indicating its intended use"""
-
- source: Source
- """Data source configuration for the dataset"""
-
- type: Literal["dataset"]
- """Type of resource, always 'dataset' for datasets"""
-
- provider_resource_id: Optional[str] = None
diff --git a/src/llama_stack_client/types/list_benchmarks_response.py b/src/llama_stack_client/types/list_benchmarks_response.py
deleted file mode 100644
index f265f130..00000000
--- a/src/llama_stack_client/types/list_benchmarks_response.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-from .benchmark_list_response import BenchmarkListResponse
-
-__all__ = ["ListBenchmarksResponse"]
-
-
-class ListBenchmarksResponse(BaseModel):
- data: BenchmarkListResponse
diff --git a/src/llama_stack_client/types/list_datasets_response.py b/src/llama_stack_client/types/list_datasets_response.py
deleted file mode 100644
index 21c4b72a..00000000
--- a/src/llama_stack_client/types/list_datasets_response.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-from .dataset_list_response import DatasetListResponse
-
-__all__ = ["ListDatasetsResponse"]
-
-
-class ListDatasetsResponse(BaseModel):
- data: DatasetListResponse
- """List of datasets"""
diff --git a/src/llama_stack_client/types/query_condition_param.py b/src/llama_stack_client/types/query_condition_param.py
deleted file mode 100644
index 59def1b4..00000000
--- a/src/llama_stack_client/types/query_condition_param.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Iterable
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["QueryConditionParam"]
-
-
-class QueryConditionParam(TypedDict, total=False):
- key: Required[str]
- """The attribute key to filter on"""
-
- op: Required[Literal["eq", "ne", "gt", "lt"]]
- """The comparison operator to apply"""
-
- value: Required[Union[bool, float, str, Iterable[object], object, None]]
- """The value to compare against"""
diff --git a/src/llama_stack_client/types/query_spans_response.py b/src/llama_stack_client/types/query_spans_response.py
deleted file mode 100644
index a20c9b92..00000000
--- a/src/llama_stack_client/types/query_spans_response.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-from .telemetry_query_spans_response import TelemetryQuerySpansResponse
-
-__all__ = ["QuerySpansResponse"]
-
-
-class QuerySpansResponse(BaseModel):
- data: TelemetryQuerySpansResponse
- """List of spans matching the query criteria"""
diff --git a/src/llama_stack_client/types/response_create_params.py b/src/llama_stack_client/types/response_create_params.py
index daf7f6cf..c5021596 100644
--- a/src/llama_stack_client/types/response_create_params.py
+++ b/src/llama_stack_client/types/response_create_params.py
@@ -2,450 +2,32 @@
from __future__ import annotations
-from typing import Dict, Union, Iterable
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
+from typing import Union
+from typing_extensions import Literal, TypedDict
-from .._types import SequenceNotStr
-
-__all__ = [
- "ResponseCreateParamsBase",
- "InputUnionMember1",
- "InputUnionMember1OpenAIResponseOutputMessageWebSearchToolCall",
- "InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall",
- "InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult",
- "InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall",
- "InputUnionMember1OpenAIResponseInputFunctionToolCallOutput",
- "InputUnionMember1OpenAIResponseMcpApprovalRequest",
- "InputUnionMember1OpenAIResponseMcpApprovalResponse",
- "InputUnionMember1OpenAIResponseMessage",
- "InputUnionMember1OpenAIResponseMessageContentUnionMember1",
- "InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
- "InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage",
- "InputUnionMember1OpenAIResponseMessageContentUnionMember2",
- "InputUnionMember1OpenAIResponseMessageContentUnionMember2Annotation",
- "InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation",
- "InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation",
- "InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation",
- "InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath",
- "Text",
- "TextFormat",
- "Tool",
- "ToolOpenAIResponseInputToolWebSearch",
- "ToolOpenAIResponseInputToolFileSearch",
- "ToolOpenAIResponseInputToolFileSearchRankingOptions",
- "ToolOpenAIResponseInputToolFunction",
- "ToolOpenAIResponseInputToolMcp",
- "ToolOpenAIResponseInputToolMcpRequireApproval",
- "ToolOpenAIResponseInputToolMcpRequireApprovalApprovalFilter",
- "ToolOpenAIResponseInputToolMcpAllowedTools",
- "ToolOpenAIResponseInputToolMcpAllowedToolsAllowedToolsFilter",
- "ResponseCreateParamsNonStreaming",
- "ResponseCreateParamsStreaming",
-]
+__all__ = ["ResponseCreateParamsBase", "ResponseCreateParamsNonStreaming"]
class ResponseCreateParamsBase(TypedDict, total=False):
- input: Required[Union[str, Iterable[InputUnionMember1]]]
- """Input message(s) to create the response."""
-
- model: Required[str]
- """The underlying LLM used for completions."""
-
- include: SequenceNotStr[str]
- """(Optional) Additional fields to include in the response."""
-
- instructions: str
-
- max_infer_iters: int
-
- previous_response_id: str
- """
- (Optional) if specified, the new response will be a continuation of the previous
- response. This can be used to easily fork-off new responses from existing
- responses.
- """
-
- store: bool
-
- temperature: float
-
- text: Text
- """Text response configuration for OpenAI responses."""
-
- tools: Iterable[Tool]
-
-
-class InputUnionMember1OpenAIResponseOutputMessageWebSearchToolCall(TypedDict, total=False):
- id: Required[str]
- """Unique identifier for this tool call"""
-
- status: Required[str]
- """Current status of the web search operation"""
-
- type: Required[Literal["web_search_call"]]
- """Tool call type identifier, always "web_search_call" """
-
-
-class InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult(TypedDict, total=False):
- attributes: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]]
- """(Optional) Key-value attributes associated with the file"""
-
- file_id: Required[str]
- """Unique identifier of the file containing the result"""
-
- filename: Required[str]
- """Name of the file containing the result"""
-
- score: Required[float]
- """Relevance score for this search result (between 0 and 1)"""
-
- text: Required[str]
- """Text content of the search result"""
-
-
-class InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall(TypedDict, total=False):
- id: Required[str]
- """Unique identifier for this tool call"""
-
- queries: Required[SequenceNotStr[str]]
- """List of search queries executed"""
-
- status: Required[str]
- """Current status of the file search operation"""
-
- type: Required[Literal["file_search_call"]]
- """Tool call type identifier, always "file_search_call" """
-
- results: Iterable[InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult]
- """(Optional) Search results returned by the file search operation"""
-
-
-class InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall(TypedDict, total=False):
- arguments: Required[str]
- """JSON string containing the function arguments"""
-
- call_id: Required[str]
- """Unique identifier for the function call"""
-
- name: Required[str]
- """Name of the function being called"""
-
- type: Required[Literal["function_call"]]
- """Tool call type identifier, always "function_call" """
-
- id: str
- """(Optional) Additional identifier for the tool call"""
-
- status: str
- """(Optional) Current status of the function call execution"""
-
-
-class InputUnionMember1OpenAIResponseInputFunctionToolCallOutput(TypedDict, total=False):
- call_id: Required[str]
-
- output: Required[str]
-
- type: Required[Literal["function_call_output"]]
-
- id: str
-
- status: str
-
-
-class InputUnionMember1OpenAIResponseMcpApprovalRequest(TypedDict, total=False):
- id: Required[str]
-
- arguments: Required[str]
-
- name: Required[str]
-
- server_label: Required[str]
-
- type: Required[Literal["mcp_approval_request"]]
-
-
-class InputUnionMember1OpenAIResponseMcpApprovalResponse(TypedDict, total=False):
- approval_request_id: Required[str]
-
- approve: Required[bool]
-
- type: Required[Literal["mcp_approval_response"]]
-
- id: str
-
- reason: str
-
-
-class InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(
- TypedDict, total=False
-):
- text: Required[str]
- """The text content of the input message"""
-
- type: Required[Literal["input_text"]]
- """Content type identifier, always "input_text" """
-
-
-class InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(
- TypedDict, total=False
-):
- detail: Required[Literal["low", "high", "auto"]]
- """Level of detail for image processing, can be "low", "high", or "auto" """
-
- type: Required[Literal["input_image"]]
- """Content type identifier, always "input_image" """
-
- image_url: str
- """(Optional) URL of the image content"""
-
-
-InputUnionMember1OpenAIResponseMessageContentUnionMember1: TypeAlias = Union[
- InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText,
- InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage,
-]
-
-
-class InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(
- TypedDict, total=False
-):
- file_id: Required[str]
- """Unique identifier of the referenced file"""
-
- filename: Required[str]
- """Name of the referenced file"""
-
- index: Required[int]
- """Position index of the citation within the content"""
-
- type: Required[Literal["file_citation"]]
- """Annotation type identifier, always "file_citation" """
-
-
-class InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(
- TypedDict, total=False
-):
- end_index: Required[int]
- """End position of the citation span in the content"""
-
- start_index: Required[int]
- """Start position of the citation span in the content"""
-
- title: Required[str]
- """Title of the referenced web resource"""
-
- type: Required[Literal["url_citation"]]
- """Annotation type identifier, always "url_citation" """
+ after: str
+ """The ID of the last response to return."""
- url: Required[str]
- """URL of the referenced web resource"""
+ limit: int
+ """The number of responses to return."""
+ model: str
+ """The model to filter responses by."""
-class InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(
- TypedDict, total=False
-):
- container_id: Required[str]
-
- end_index: Required[int]
-
- file_id: Required[str]
-
- filename: Required[str]
-
- start_index: Required[int]
-
- type: Required[Literal["container_file_citation"]]
-
-
-class InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(
- TypedDict, total=False
-):
- file_id: Required[str]
-
- index: Required[int]
-
- type: Required[Literal["file_path"]]
-
-
-InputUnionMember1OpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Union[
- InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation,
- InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation,
- InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation,
- InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath,
-]
-
-
-class InputUnionMember1OpenAIResponseMessageContentUnionMember2(TypedDict, total=False):
- annotations: Required[Iterable[InputUnionMember1OpenAIResponseMessageContentUnionMember2Annotation]]
-
- text: Required[str]
-
- type: Required[Literal["output_text"]]
-
-
-class InputUnionMember1OpenAIResponseMessage(TypedDict, total=False):
- content: Required[
- Union[
- str,
- Iterable[InputUnionMember1OpenAIResponseMessageContentUnionMember1],
- Iterable[InputUnionMember1OpenAIResponseMessageContentUnionMember2],
- ]
- ]
-
- role: Required[Literal["system", "developer", "user", "assistant"]]
-
- type: Required[Literal["message"]]
-
- id: str
-
- status: str
-
-
-InputUnionMember1: TypeAlias = Union[
- InputUnionMember1OpenAIResponseOutputMessageWebSearchToolCall,
- InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall,
- InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall,
- InputUnionMember1OpenAIResponseInputFunctionToolCallOutput,
- InputUnionMember1OpenAIResponseMcpApprovalRequest,
- InputUnionMember1OpenAIResponseMcpApprovalResponse,
- InputUnionMember1OpenAIResponseMessage,
-]
-
-
-class TextFormat(TypedDict, total=False):
- type: Required[Literal["text", "json_schema", "json_object"]]
- """Must be "text", "json_schema", or "json_object" to identify the format type"""
-
- description: str
- """(Optional) A description of the response format. Only used for json_schema."""
-
- name: str
- """The name of the response format. Only used for json_schema."""
-
- schema: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
- """The JSON schema the response should conform to.
-
- In a Python SDK, this is often a `pydantic` model. Only used for json_schema.
- """
-
- strict: bool
- """(Optional) Whether to strictly enforce the JSON schema.
-
- If true, the response must match the schema exactly. Only used for json_schema.
- """
-
-
-class Text(TypedDict, total=False):
- format: TextFormat
- """(Optional) Text format configuration specifying output format requirements"""
-
-
-class ToolOpenAIResponseInputToolWebSearch(TypedDict, total=False):
- type: Required[Literal["web_search", "web_search_preview", "web_search_preview_2025_03_11"]]
- """Web search tool type variant to use"""
-
- search_context_size: str
- """(Optional) Size of search context, must be "low", "medium", or "high" """
-
-
-class ToolOpenAIResponseInputToolFileSearchRankingOptions(TypedDict, total=False):
- ranker: str
- """(Optional) Name of the ranking algorithm to use"""
-
- score_threshold: float
- """(Optional) Minimum relevance score threshold for results"""
-
-
-class ToolOpenAIResponseInputToolFileSearch(TypedDict, total=False):
- type: Required[Literal["file_search"]]
- """Tool type identifier, always "file_search" """
-
- vector_store_ids: Required[SequenceNotStr[str]]
- """List of vector store identifiers to search within"""
-
- filters: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
- """(Optional) Additional filters to apply to the search"""
-
- max_num_results: int
- """(Optional) Maximum number of search results to return (1-50)"""
-
- ranking_options: ToolOpenAIResponseInputToolFileSearchRankingOptions
- """(Optional) Options for ranking and scoring search results"""
-
-
-class ToolOpenAIResponseInputToolFunction(TypedDict, total=False):
- name: Required[str]
- """Name of the function that can be called"""
-
- type: Required[Literal["function"]]
- """Tool type identifier, always "function" """
-
- description: str
- """(Optional) Description of what the function does"""
-
- parameters: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
- """(Optional) JSON schema defining the function's parameters"""
-
- strict: bool
- """(Optional) Whether to enforce strict parameter validation"""
-
-
-class ToolOpenAIResponseInputToolMcpRequireApprovalApprovalFilter(TypedDict, total=False):
- always: SequenceNotStr[str]
- """(Optional) List of tool names that always require approval"""
-
- never: SequenceNotStr[str]
- """(Optional) List of tool names that never require approval"""
-
-
-ToolOpenAIResponseInputToolMcpRequireApproval: TypeAlias = Union[
- Literal["always", "never"], ToolOpenAIResponseInputToolMcpRequireApprovalApprovalFilter
-]
-
-
-class ToolOpenAIResponseInputToolMcpAllowedToolsAllowedToolsFilter(TypedDict, total=False):
- tool_names: SequenceNotStr[str]
- """(Optional) List of specific tool names that are allowed"""
-
-
-ToolOpenAIResponseInputToolMcpAllowedTools: TypeAlias = Union[
- SequenceNotStr[str], ToolOpenAIResponseInputToolMcpAllowedToolsAllowedToolsFilter
-]
-
-
-class ToolOpenAIResponseInputToolMcp(TypedDict, total=False):
- require_approval: Required[ToolOpenAIResponseInputToolMcpRequireApproval]
- """Approval requirement for tool calls ("always", "never", or filter)"""
-
- server_label: Required[str]
- """Label to identify this MCP server"""
-
- server_url: Required[str]
- """URL endpoint of the MCP server"""
-
- type: Required[Literal["mcp"]]
- """Tool type identifier, always "mcp" """
-
- allowed_tools: ToolOpenAIResponseInputToolMcpAllowedTools
- """(Optional) Restriction on which tools can be used from this server"""
-
- headers: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
- """(Optional) HTTP headers to include when connecting to the server"""
-
-
-Tool: TypeAlias = Union[
- ToolOpenAIResponseInputToolWebSearch,
- ToolOpenAIResponseInputToolFileSearch,
- ToolOpenAIResponseInputToolFunction,
- ToolOpenAIResponseInputToolMcp,
-]
+ order: Literal["asc", "desc"]
+ """The order to sort responses by when sorted by created_at ('asc' or 'desc')."""
class ResponseCreateParamsNonStreaming(ResponseCreateParamsBase, total=False):
- stream: Literal[False]
+ pass
-class ResponseCreateParamsStreaming(ResponseCreateParamsBase):
- stream: Required[Literal[True]]
+class ResponseCreateParamsNonStreaming(ResponseCreateParamsBase, total=False):
+ pass
-ResponseCreateParams = Union[ResponseCreateParamsNonStreaming, ResponseCreateParamsStreaming]
+ResponseCreateParams = Union[ResponseCreateParamsNonStreaming, ResponseCreateParamsNonStreaming]
diff --git a/src/llama_stack_client/types/response_create_response.py b/src/llama_stack_client/types/response_create_response.py
new file mode 100644
index 00000000..bc031fcc
--- /dev/null
+++ b/src/llama_stack_client/types/response_create_response.py
@@ -0,0 +1,640 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from pydantic import Field as FieldInfo
+
+from .._utils import PropertyInfo
+from .._models import BaseModel
+
+__all__ = [
+ "ResponseCreateResponse",
+ "Data",
+ "DataInput",
+ "DataInputOpenAIResponseOutputMessageWebSearchToolCall",
+ "DataInputOpenAIResponseOutputMessageFileSearchToolCall",
+ "DataInputOpenAIResponseOutputMessageFileSearchToolCallResult",
+ "DataInputOpenAIResponseOutputMessageFunctionToolCall",
+ "DataInputOpenAIResponseInputFunctionToolCallOutput",
+ "DataInputOpenAIResponseMcpApprovalRequest",
+ "DataInputOpenAIResponseMcpApprovalResponse",
+ "DataInputOpenAIResponseMessage",
+ "DataInputOpenAIResponseMessageContentUnionMember1",
+ "DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
+ "DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage",
+ "DataInputOpenAIResponseMessageContentUnionMember2",
+ "DataInputOpenAIResponseMessageContentUnionMember2Annotation",
+ "DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation",
+ "DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation",
+ "DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation",
+ "DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath",
+ "DataOutput",
+ "DataOutputOpenAIResponseMessage",
+ "DataOutputOpenAIResponseMessageContentUnionMember1",
+ "DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
+ "DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage",
+ "DataOutputOpenAIResponseMessageContentUnionMember2",
+ "DataOutputOpenAIResponseMessageContentUnionMember2Annotation",
+ "DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation",
+ "DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation",
+ "DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation",
+ "DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath",
+ "DataOutputOpenAIResponseOutputMessageWebSearchToolCall",
+ "DataOutputOpenAIResponseOutputMessageFileSearchToolCall",
+ "DataOutputOpenAIResponseOutputMessageFileSearchToolCallResult",
+ "DataOutputOpenAIResponseOutputMessageFunctionToolCall",
+ "DataOutputOpenAIResponseOutputMessageMcpCall",
+ "DataOutputOpenAIResponseOutputMessageMcpListTools",
+ "DataOutputOpenAIResponseOutputMessageMcpListToolsTool",
+ "DataOutputOpenAIResponseMcpApprovalRequest",
+ "DataText",
+ "DataTextFormat",
+ "DataError",
+]
+
+
+class DataInputOpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
+ id: str
+ """Unique identifier for this tool call"""
+
+ status: str
+ """Current status of the web search operation"""
+
+ type: Literal["web_search_call"]
+ """Tool call type identifier, always "web_search_call" """
+
+
+class DataInputOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel):
+ attributes: Dict[str, Union[bool, float, str, List[object], object, None]]
+ """(Optional) Key-value attributes associated with the file"""
+
+ file_id: str
+ """Unique identifier of the file containing the result"""
+
+ filename: str
+ """Name of the file containing the result"""
+
+ score: float
+ """Relevance score for this search result (between 0 and 1)"""
+
+ text: str
+ """Text content of the search result"""
+
+
+class DataInputOpenAIResponseOutputMessageFileSearchToolCall(BaseModel):
+ id: str
+ """Unique identifier for this tool call"""
+
+ queries: List[str]
+ """List of search queries executed"""
+
+ status: str
+ """Current status of the file search operation"""
+
+ type: Literal["file_search_call"]
+ """Tool call type identifier, always "file_search_call" """
+
+ results: Optional[List[DataInputOpenAIResponseOutputMessageFileSearchToolCallResult]] = None
+ """(Optional) Search results returned by the file search operation"""
+
+
+class DataInputOpenAIResponseOutputMessageFunctionToolCall(BaseModel):
+ arguments: str
+ """JSON string containing the function arguments"""
+
+ call_id: str
+ """Unique identifier for the function call"""
+
+ name: str
+ """Name of the function being called"""
+
+ type: Literal["function_call"]
+ """Tool call type identifier, always "function_call" """
+
+ id: Optional[str] = None
+ """(Optional) Additional identifier for the tool call"""
+
+ status: Optional[str] = None
+ """(Optional) Current status of the function call execution"""
+
+
+class DataInputOpenAIResponseInputFunctionToolCallOutput(BaseModel):
+ call_id: str
+
+ output: str
+
+ type: Literal["function_call_output"]
+
+ id: Optional[str] = None
+
+ status: Optional[str] = None
+
+
+class DataInputOpenAIResponseMcpApprovalRequest(BaseModel):
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Literal["mcp_approval_request"]
+
+
+class DataInputOpenAIResponseMcpApprovalResponse(BaseModel):
+ approval_request_id: str
+
+ approve: bool
+
+ type: Literal["mcp_approval_response"]
+
+ id: Optional[str] = None
+
+ reason: Optional[str] = None
+
+
+class DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel):
+ text: str
+ """The text content of the input message"""
+
+ type: Literal["input_text"]
+ """Content type identifier, always "input_text" """
+
+
+class DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(BaseModel):
+ detail: Literal["low", "high", "auto"]
+ """Level of detail for image processing, can be "low", "high", or "auto" """
+
+ type: Literal["input_image"]
+ """Content type identifier, always "input_image" """
+
+ image_url: Optional[str] = None
+ """(Optional) URL of the image content"""
+
+
+DataInputOpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[
+ Union[
+ DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText,
+ DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(BaseModel):
+ file_id: str
+ """Unique identifier of the referenced file"""
+
+ filename: str
+ """Name of the referenced file"""
+
+ index: int
+ """Position index of the citation within the content"""
+
+ type: Literal["file_citation"]
+ """Annotation type identifier, always "file_citation" """
+
+
+class DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(BaseModel):
+ end_index: int
+ """End position of the citation span in the content"""
+
+ start_index: int
+ """Start position of the citation span in the content"""
+
+ title: str
+ """Title of the referenced web resource"""
+
+ type: Literal["url_citation"]
+ """Annotation type identifier, always "url_citation" """
+
+ url: str
+ """URL of the referenced web resource"""
+
+
+class DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(
+ BaseModel
+):
+ container_id: str
+
+ end_index: int
+
+ file_id: str
+
+ filename: str
+
+ start_index: int
+
+ type: Literal["container_file_citation"]
+
+
+class DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(BaseModel):
+ file_id: str
+
+ index: int
+
+ type: Literal["file_path"]
+
+
+DataInputOpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Annotated[
+ Union[
+ DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation,
+ DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation,
+ DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation,
+ DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataInputOpenAIResponseMessageContentUnionMember2(BaseModel):
+ annotations: List[DataInputOpenAIResponseMessageContentUnionMember2Annotation]
+
+ text: str
+
+ type: Literal["output_text"]
+
+
+class DataInputOpenAIResponseMessage(BaseModel):
+ content: Union[
+ str,
+ List[DataInputOpenAIResponseMessageContentUnionMember1],
+ List[DataInputOpenAIResponseMessageContentUnionMember2],
+ ]
+
+ role: Literal["system", "developer", "user", "assistant"]
+
+ type: Literal["message"]
+
+ id: Optional[str] = None
+
+ status: Optional[str] = None
+
+
+DataInput: TypeAlias = Union[
+ DataInputOpenAIResponseOutputMessageWebSearchToolCall,
+ DataInputOpenAIResponseOutputMessageFileSearchToolCall,
+ DataInputOpenAIResponseOutputMessageFunctionToolCall,
+ DataInputOpenAIResponseInputFunctionToolCallOutput,
+ DataInputOpenAIResponseMcpApprovalRequest,
+ DataInputOpenAIResponseMcpApprovalResponse,
+ DataInputOpenAIResponseMessage,
+]
+
+
+class DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel):
+ text: str
+ """The text content of the input message"""
+
+ type: Literal["input_text"]
+ """Content type identifier, always "input_text" """
+
+
+class DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(BaseModel):
+ detail: Literal["low", "high", "auto"]
+ """Level of detail for image processing, can be "low", "high", or "auto" """
+
+ type: Literal["input_image"]
+ """Content type identifier, always "input_image" """
+
+ image_url: Optional[str] = None
+ """(Optional) URL of the image content"""
+
+
+DataOutputOpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[
+ Union[
+ DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText,
+ DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(BaseModel):
+ file_id: str
+ """Unique identifier of the referenced file"""
+
+ filename: str
+ """Name of the referenced file"""
+
+ index: int
+ """Position index of the citation within the content"""
+
+ type: Literal["file_citation"]
+ """Annotation type identifier, always "file_citation" """
+
+
+class DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(BaseModel):
+ end_index: int
+ """End position of the citation span in the content"""
+
+ start_index: int
+ """Start position of the citation span in the content"""
+
+ title: str
+ """Title of the referenced web resource"""
+
+ type: Literal["url_citation"]
+ """Annotation type identifier, always "url_citation" """
+
+ url: str
+ """URL of the referenced web resource"""
+
+
+class DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(
+ BaseModel
+):
+ container_id: str
+
+ end_index: int
+
+ file_id: str
+
+ filename: str
+
+ start_index: int
+
+ type: Literal["container_file_citation"]
+
+
+class DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(BaseModel):
+ file_id: str
+
+ index: int
+
+ type: Literal["file_path"]
+
+
+DataOutputOpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Annotated[
+ Union[
+ DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation,
+ DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation,
+ DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation,
+ DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataOutputOpenAIResponseMessageContentUnionMember2(BaseModel):
+ annotations: List[DataOutputOpenAIResponseMessageContentUnionMember2Annotation]
+
+ text: str
+
+ type: Literal["output_text"]
+
+
+class DataOutputOpenAIResponseMessage(BaseModel):
+ content: Union[
+ str,
+ List[DataOutputOpenAIResponseMessageContentUnionMember1],
+ List[DataOutputOpenAIResponseMessageContentUnionMember2],
+ ]
+
+ role: Literal["system", "developer", "user", "assistant"]
+
+ type: Literal["message"]
+
+ id: Optional[str] = None
+
+ status: Optional[str] = None
+
+
+class DataOutputOpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
+ id: str
+ """Unique identifier for this tool call"""
+
+ status: str
+ """Current status of the web search operation"""
+
+ type: Literal["web_search_call"]
+ """Tool call type identifier, always "web_search_call" """
+
+
+class DataOutputOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel):
+ attributes: Dict[str, Union[bool, float, str, List[object], object, None]]
+ """(Optional) Key-value attributes associated with the file"""
+
+ file_id: str
+ """Unique identifier of the file containing the result"""
+
+ filename: str
+ """Name of the file containing the result"""
+
+ score: float
+ """Relevance score for this search result (between 0 and 1)"""
+
+ text: str
+ """Text content of the search result"""
+
+
+class DataOutputOpenAIResponseOutputMessageFileSearchToolCall(BaseModel):
+ id: str
+ """Unique identifier for this tool call"""
+
+ queries: List[str]
+ """List of search queries executed"""
+
+ status: str
+ """Current status of the file search operation"""
+
+ type: Literal["file_search_call"]
+ """Tool call type identifier, always "file_search_call" """
+
+ results: Optional[List[DataOutputOpenAIResponseOutputMessageFileSearchToolCallResult]] = None
+ """(Optional) Search results returned by the file search operation"""
+
+
+class DataOutputOpenAIResponseOutputMessageFunctionToolCall(BaseModel):
+ arguments: str
+ """JSON string containing the function arguments"""
+
+ call_id: str
+ """Unique identifier for the function call"""
+
+ name: str
+ """Name of the function being called"""
+
+ type: Literal["function_call"]
+ """Tool call type identifier, always "function_call" """
+
+ id: Optional[str] = None
+ """(Optional) Additional identifier for the tool call"""
+
+ status: Optional[str] = None
+ """(Optional) Current status of the function call execution"""
+
+
+class DataOutputOpenAIResponseOutputMessageMcpCall(BaseModel):
+ id: str
+ """Unique identifier for this MCP call"""
+
+ arguments: str
+ """JSON string containing the MCP call arguments"""
+
+ name: str
+ """Name of the MCP method being called"""
+
+ server_label: str
+ """Label identifying the MCP server handling the call"""
+
+ type: Literal["mcp_call"]
+ """Tool call type identifier, always "mcp_call" """
+
+ error: Optional[str] = None
+ """(Optional) Error message if the MCP call failed"""
+
+ output: Optional[str] = None
+ """(Optional) Output result from the successful MCP call"""
+
+
+class DataOutputOpenAIResponseOutputMessageMcpListToolsTool(BaseModel):
+ input_schema: Dict[str, Union[bool, float, str, List[object], object, None]]
+ """JSON schema defining the tool's input parameters"""
+
+ name: str
+ """Name of the tool"""
+
+ description: Optional[str] = None
+ """(Optional) Description of what the tool does"""
+
+
+class DataOutputOpenAIResponseOutputMessageMcpListTools(BaseModel):
+ id: str
+ """Unique identifier for this MCP list tools operation"""
+
+ server_label: str
+ """Label identifying the MCP server providing the tools"""
+
+ tools: List[DataOutputOpenAIResponseOutputMessageMcpListToolsTool]
+ """List of available tools provided by the MCP server"""
+
+ type: Literal["mcp_list_tools"]
+ """Tool call type identifier, always "mcp_list_tools" """
+
+
+class DataOutputOpenAIResponseMcpApprovalRequest(BaseModel):
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Literal["mcp_approval_request"]
+
+
+DataOutput: TypeAlias = Annotated[
+ Union[
+ DataOutputOpenAIResponseMessage,
+ DataOutputOpenAIResponseOutputMessageWebSearchToolCall,
+ DataOutputOpenAIResponseOutputMessageFileSearchToolCall,
+ DataOutputOpenAIResponseOutputMessageFunctionToolCall,
+ DataOutputOpenAIResponseOutputMessageMcpCall,
+ DataOutputOpenAIResponseOutputMessageMcpListTools,
+ DataOutputOpenAIResponseMcpApprovalRequest,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataTextFormat(BaseModel):
+ type: Literal["text", "json_schema", "json_object"]
+ """Must be "text", "json_schema", or "json_object" to identify the format type"""
+
+ description: Optional[str] = None
+ """(Optional) A description of the response format. Only used for json_schema."""
+
+ name: Optional[str] = None
+ """The name of the response format. Only used for json_schema."""
+
+ schema_: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = FieldInfo(
+ alias="schema", default=None
+ )
+ """The JSON schema the response should conform to.
+
+ In a Python SDK, this is often a `pydantic` model. Only used for json_schema.
+ """
+
+ strict: Optional[bool] = None
+ """(Optional) Whether to strictly enforce the JSON schema.
+
+ If true, the response must match the schema exactly. Only used for json_schema.
+ """
+
+
+class DataText(BaseModel):
+ format: Optional[DataTextFormat] = None
+ """(Optional) Text format configuration specifying output format requirements"""
+
+
+class DataError(BaseModel):
+ code: str
+ """Error code identifying the type of failure"""
+
+ message: str
+ """Human-readable error message describing the failure"""
+
+
+class Data(BaseModel):
+ id: str
+ """Unique identifier for this response"""
+
+ created_at: int
+ """Unix timestamp when the response was created"""
+
+ input: List[DataInput]
+ """List of input items that led to this response"""
+
+ model: str
+ """Model identifier used for generation"""
+
+ object: Literal["response"]
+ """Object type identifier, always "response" """
+
+ output: List[DataOutput]
+ """List of generated output items (messages, tool calls, etc.)"""
+
+ parallel_tool_calls: bool
+ """Whether tool calls can be executed in parallel"""
+
+ status: str
+ """Current status of the response generation"""
+
+ text: DataText
+ """Text formatting configuration for the response"""
+
+ error: Optional[DataError] = None
+ """(Optional) Error details if the response generation failed"""
+
+ previous_response_id: Optional[str] = None
+ """(Optional) ID of the previous response in a conversation"""
+
+ temperature: Optional[float] = None
+ """(Optional) Sampling temperature used for generation"""
+
+ top_p: Optional[float] = None
+ """(Optional) Nucleus sampling parameter used for generation"""
+
+ truncation: Optional[str] = None
+ """(Optional) Truncation strategy applied to the response"""
+
+
+class ResponseCreateResponse(BaseModel):
+ data: List[Data]
+ """List of response objects with their input context"""
+
+ first_id: str
+ """Identifier of the first item in this page"""
+
+ has_more: bool
+ """Whether there are more results available beyond this page"""
+
+ last_id: str
+ """Identifier of the last item in this page"""
+
+ object: Literal["list"]
+ """Object type identifier, always "list" """
diff --git a/src/llama_stack_client/types/span_with_status.py b/src/llama_stack_client/types/span_with_status.py
deleted file mode 100644
index 04d124bd..00000000
--- a/src/llama_stack_client/types/span_with_status.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from datetime import datetime
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["SpanWithStatus"]
-
-
-class SpanWithStatus(BaseModel):
- name: str
- """Human-readable name describing the operation this span represents"""
-
- span_id: str
- """Unique identifier for the span"""
-
- start_time: datetime
- """Timestamp when the operation began"""
-
- trace_id: str
- """Unique identifier for the trace this span belongs to"""
-
- attributes: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
- """(Optional) Key-value pairs containing additional metadata about the span"""
-
- end_time: Optional[datetime] = None
- """(Optional) Timestamp when the operation finished, if completed"""
-
- parent_span_id: Optional[str] = None
- """(Optional) Unique identifier for the parent span, if this is a child span"""
-
- status: Optional[Literal["ok", "error"]] = None
- """(Optional) The current status of the span"""
diff --git a/src/llama_stack_client/types/telemetry_get_span_response.py b/src/llama_stack_client/types/telemetry_get_span_response.py
deleted file mode 100644
index 6826d4d0..00000000
--- a/src/llama_stack_client/types/telemetry_get_span_response.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from datetime import datetime
-
-from .._models import BaseModel
-
-__all__ = ["TelemetryGetSpanResponse"]
-
-
-class TelemetryGetSpanResponse(BaseModel):
- name: str
- """Human-readable name describing the operation this span represents"""
-
- span_id: str
- """Unique identifier for the span"""
-
- start_time: datetime
- """Timestamp when the operation began"""
-
- trace_id: str
- """Unique identifier for the trace this span belongs to"""
-
- attributes: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
- """(Optional) Key-value pairs containing additional metadata about the span"""
-
- end_time: Optional[datetime] = None
- """(Optional) Timestamp when the operation finished, if completed"""
-
- parent_span_id: Optional[str] = None
- """(Optional) Unique identifier for the parent span, if this is a child span"""
diff --git a/src/llama_stack_client/types/telemetry_get_span_tree_params.py b/src/llama_stack_client/types/telemetry_get_span_tree_params.py
deleted file mode 100644
index 92dc7e1d..00000000
--- a/src/llama_stack_client/types/telemetry_get_span_tree_params.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-from .._types import SequenceNotStr
-
-__all__ = ["TelemetryGetSpanTreeParams"]
-
-
-class TelemetryGetSpanTreeParams(TypedDict, total=False):
- attributes_to_return: SequenceNotStr[str]
- """The attributes to return in the tree."""
-
- max_depth: int
- """The maximum depth of the tree."""
diff --git a/src/llama_stack_client/types/telemetry_get_span_tree_response.py b/src/llama_stack_client/types/telemetry_get_span_tree_response.py
deleted file mode 100644
index b72e6158..00000000
--- a/src/llama_stack_client/types/telemetry_get_span_tree_response.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict
-from typing_extensions import TypeAlias
-
-from .span_with_status import SpanWithStatus
-
-__all__ = ["TelemetryGetSpanTreeResponse"]
-
-TelemetryGetSpanTreeResponse: TypeAlias = Dict[str, SpanWithStatus]
diff --git a/src/llama_stack_client/types/telemetry_query_metrics_params.py b/src/llama_stack_client/types/telemetry_query_metrics_params.py
deleted file mode 100644
index adf3f720..00000000
--- a/src/llama_stack_client/types/telemetry_query_metrics_params.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["TelemetryQueryMetricsParams", "LabelMatcher"]
-
-
-class TelemetryQueryMetricsParams(TypedDict, total=False):
- query_type: Required[Literal["range", "instant"]]
- """The type of query to perform."""
-
- start_time: Required[int]
- """The start time of the metric to query."""
-
- end_time: int
- """The end time of the metric to query."""
-
- granularity: str
- """The granularity of the metric to query."""
-
- label_matchers: Iterable[LabelMatcher]
- """The label matchers to apply to the metric."""
-
-
-class LabelMatcher(TypedDict, total=False):
- name: Required[str]
- """The name of the label to match"""
-
- operator: Required[Literal["=", "!=", "=~", "!~"]]
- """The comparison operator to use for matching"""
-
- value: Required[str]
- """The value to match against"""
diff --git a/src/llama_stack_client/types/telemetry_query_metrics_response.py b/src/llama_stack_client/types/telemetry_query_metrics_response.py
deleted file mode 100644
index e9f4264e..00000000
--- a/src/llama_stack_client/types/telemetry_query_metrics_response.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import TypeAlias
-
-from .._models import BaseModel
-
-__all__ = [
- "TelemetryQueryMetricsResponse",
- "TelemetryQueryMetricsResponseItem",
- "TelemetryQueryMetricsResponseItemLabel",
- "TelemetryQueryMetricsResponseItemValue",
-]
-
-
-class TelemetryQueryMetricsResponseItemLabel(BaseModel):
- name: str
- """The name of the label"""
-
- value: str
- """The value of the label"""
-
-
-class TelemetryQueryMetricsResponseItemValue(BaseModel):
- timestamp: int
- """Unix timestamp when the metric value was recorded"""
-
- unit: str
-
- value: float
- """The numeric value of the metric at this timestamp"""
-
-
-class TelemetryQueryMetricsResponseItem(BaseModel):
- labels: List[TelemetryQueryMetricsResponseItemLabel]
- """List of labels associated with this metric series"""
-
- metric: str
- """The name of the metric"""
-
- values: List[TelemetryQueryMetricsResponseItemValue]
- """List of data points in chronological order"""
-
-
-TelemetryQueryMetricsResponse: TypeAlias = List[TelemetryQueryMetricsResponseItem]
diff --git a/src/llama_stack_client/types/telemetry_query_spans_params.py b/src/llama_stack_client/types/telemetry_query_spans_params.py
deleted file mode 100644
index 452439e3..00000000
--- a/src/llama_stack_client/types/telemetry_query_spans_params.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable
-from typing_extensions import Required, TypedDict
-
-from .._types import SequenceNotStr
-from .query_condition_param import QueryConditionParam
-
-__all__ = ["TelemetryQuerySpansParams"]
-
-
-class TelemetryQuerySpansParams(TypedDict, total=False):
- attribute_filters: Required[Iterable[QueryConditionParam]]
- """The attribute filters to apply to the spans."""
-
- attributes_to_return: Required[SequenceNotStr[str]]
- """The attributes to return in the spans."""
-
- max_depth: int
- """The maximum depth of the tree."""
diff --git a/src/llama_stack_client/types/telemetry_query_spans_response.py b/src/llama_stack_client/types/telemetry_query_spans_response.py
deleted file mode 100644
index 49eaeb38..00000000
--- a/src/llama_stack_client/types/telemetry_query_spans_response.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from datetime import datetime
-from typing_extensions import TypeAlias
-
-from .._models import BaseModel
-
-__all__ = ["TelemetryQuerySpansResponse", "TelemetryQuerySpansResponseItem"]
-
-
-class TelemetryQuerySpansResponseItem(BaseModel):
- name: str
- """Human-readable name describing the operation this span represents"""
-
- span_id: str
- """Unique identifier for the span"""
-
- start_time: datetime
- """Timestamp when the operation began"""
-
- trace_id: str
- """Unique identifier for the trace this span belongs to"""
-
- attributes: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
- """(Optional) Key-value pairs containing additional metadata about the span"""
-
- end_time: Optional[datetime] = None
- """(Optional) Timestamp when the operation finished, if completed"""
-
- parent_span_id: Optional[str] = None
- """(Optional) Unique identifier for the parent span, if this is a child span"""
-
-
-TelemetryQuerySpansResponse: TypeAlias = List[TelemetryQuerySpansResponseItem]
diff --git a/src/llama_stack_client/types/telemetry_query_traces_params.py b/src/llama_stack_client/types/telemetry_query_traces_params.py
deleted file mode 100644
index 2a6eb334..00000000
--- a/src/llama_stack_client/types/telemetry_query_traces_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable
-from typing_extensions import TypedDict
-
-from .._types import SequenceNotStr
-from .query_condition_param import QueryConditionParam
-
-__all__ = ["TelemetryQueryTracesParams"]
-
-
-class TelemetryQueryTracesParams(TypedDict, total=False):
- attribute_filters: Iterable[QueryConditionParam]
- """The attribute filters to apply to the traces."""
-
- limit: int
- """The limit of traces to return."""
-
- offset: int
- """The offset of the traces to return."""
-
- order_by: SequenceNotStr[str]
- """The order by of the traces to return."""
diff --git a/src/llama_stack_client/types/telemetry_query_traces_response.py b/src/llama_stack_client/types/telemetry_query_traces_response.py
deleted file mode 100644
index 01a1365d..00000000
--- a/src/llama_stack_client/types/telemetry_query_traces_response.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import TypeAlias
-
-from .trace import Trace
-
-__all__ = ["TelemetryQueryTracesResponse"]
-
-TelemetryQueryTracesResponse: TypeAlias = List[Trace]
diff --git a/src/llama_stack_client/types/telemetry_save_spans_to_dataset_params.py b/src/llama_stack_client/types/telemetry_save_spans_to_dataset_params.py
deleted file mode 100644
index f0bdebbd..00000000
--- a/src/llama_stack_client/types/telemetry_save_spans_to_dataset_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable
-from typing_extensions import Required, TypedDict
-
-from .._types import SequenceNotStr
-from .query_condition_param import QueryConditionParam
-
-__all__ = ["TelemetrySaveSpansToDatasetParams"]
-
-
-class TelemetrySaveSpansToDatasetParams(TypedDict, total=False):
- attribute_filters: Required[Iterable[QueryConditionParam]]
- """The attribute filters to apply to the spans."""
-
- attributes_to_save: Required[SequenceNotStr[str]]
- """The attributes to save to the dataset."""
-
- dataset_id: Required[str]
- """The ID of the dataset to save the spans to."""
-
- max_depth: int
- """The maximum depth of the tree."""
diff --git a/src/llama_stack_client/types/tool.py b/src/llama_stack_client/types/tool.py
index ae77cf69..a7243b64 100644
--- a/src/llama_stack_client/types/tool.py
+++ b/src/llama_stack_client/types/tool.py
@@ -5,7 +5,30 @@
from .._models import BaseModel
-__all__ = ["Tool"]
+__all__ = ["Tool", "Parameter"]
+
+
+class Parameter(BaseModel):
+ description: str
+ """Human-readable description of what the parameter does"""
+
+ name: str
+ """Name of the parameter"""
+
+ parameter_type: str
+ """Type of the parameter (e.g., string, integer)"""
+
+ required: bool
+ """Whether this parameter is required for tool invocation"""
+
+ default: Union[bool, float, str, List[object], object, None] = None
+ """(Optional) Default value for the parameter if not provided"""
+
+ items: Optional[object] = None
+ """Type of the elements when parameter_type is array"""
+
+ title: Optional[str] = None
+ """(Optional) Title of the parameter"""
class Tool(BaseModel):
@@ -14,6 +37,9 @@ class Tool(BaseModel):
identifier: str
+ parameters: List[Parameter]
+ """List of parameters this tool accepts"""
+
provider_id: str
toolgroup_id: str
@@ -22,13 +48,7 @@ class Tool(BaseModel):
type: Literal["tool"]
"""Type of resource, always 'tool'"""
- input_schema: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
- """JSON Schema for the tool's input parameters"""
-
metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
"""(Optional) Additional metadata about the tool"""
- output_schema: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
- """JSON Schema for the tool's output"""
-
provider_resource_id: Optional[str] = None
diff --git a/src/llama_stack_client/types/tool_def.py b/src/llama_stack_client/types/tool_def.py
index e549c4e9..21949b41 100644
--- a/src/llama_stack_client/types/tool_def.py
+++ b/src/llama_stack_client/types/tool_def.py
@@ -4,7 +4,30 @@
from .._models import BaseModel
-__all__ = ["ToolDef"]
+__all__ = ["ToolDef", "Parameter"]
+
+
+class Parameter(BaseModel):
+ description: str
+ """Human-readable description of what the parameter does"""
+
+ name: str
+ """Name of the parameter"""
+
+ parameter_type: str
+ """Type of the parameter (e.g., string, integer)"""
+
+ required: bool
+ """Whether this parameter is required for tool invocation"""
+
+ default: Union[bool, float, str, List[object], object, None] = None
+ """(Optional) Default value for the parameter if not provided"""
+
+ items: Optional[object] = None
+ """Type of the elements when parameter_type is array"""
+
+ title: Optional[str] = None
+ """(Optional) Title of the parameter"""
class ToolDef(BaseModel):
@@ -14,11 +37,8 @@ class ToolDef(BaseModel):
description: Optional[str] = None
"""(Optional) Human-readable description of what the tool does"""
- input_schema: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
- """(Optional) JSON Schema for tool inputs (MCP inputSchema)"""
-
metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
"""(Optional) Additional metadata about the tool"""
- output_schema: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
- """(Optional) JSON Schema for tool outputs (MCP outputSchema)"""
+ parameters: Optional[List[Parameter]] = None
+ """(Optional) List of parameters this tool accepts"""
diff --git a/src/llama_stack_client/types/tool_def_param.py b/src/llama_stack_client/types/tool_def_param.py
index 9d5c71a7..a50437b2 100644
--- a/src/llama_stack_client/types/tool_def_param.py
+++ b/src/llama_stack_client/types/tool_def_param.py
@@ -5,7 +5,30 @@
from typing import Dict, Union, Iterable
from typing_extensions import Required, TypedDict
-__all__ = ["ToolDefParam"]
+__all__ = ["ToolDefParam", "Parameter"]
+
+
+class Parameter(TypedDict, total=False):
+ description: Required[str]
+ """Human-readable description of what the parameter does"""
+
+ name: Required[str]
+ """Name of the parameter"""
+
+ parameter_type: Required[str]
+ """Type of the parameter (e.g., string, integer)"""
+
+ required: Required[bool]
+ """Whether this parameter is required for tool invocation"""
+
+ default: Union[bool, float, str, Iterable[object], object, None]
+ """(Optional) Default value for the parameter if not provided"""
+
+ items: object
+ """Type of the elements when parameter_type is array"""
+
+ title: str
+ """(Optional) Title of the parameter"""
class ToolDefParam(TypedDict, total=False):
@@ -15,11 +38,8 @@ class ToolDefParam(TypedDict, total=False):
description: str
"""(Optional) Human-readable description of what the tool does"""
- input_schema: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
- """(Optional) JSON Schema for tool inputs (MCP inputSchema)"""
-
metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
"""(Optional) Additional metadata about the tool"""
- output_schema: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
- """(Optional) JSON Schema for tool outputs (MCP outputSchema)"""
+ parameters: Iterable[Parameter]
+ """(Optional) List of parameters this tool accepts"""
diff --git a/src/llama_stack_client/types/trace.py b/src/llama_stack_client/types/trace.py
deleted file mode 100644
index 0657d616..00000000
--- a/src/llama_stack_client/types/trace.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from datetime import datetime
-
-from .._models import BaseModel
-
-__all__ = ["Trace"]
-
-
-class Trace(BaseModel):
- root_span_id: str
- """Unique identifier for the root span that started this trace"""
-
- start_time: datetime
- """Timestamp when the trace began"""
-
- trace_id: str
- """Unique identifier for the trace"""
-
- end_time: Optional[datetime] = None
- """(Optional) Timestamp when the trace finished, if completed"""
diff --git a/tests/api_resources/alpha/test_agents.py b/tests/api_resources/alpha/test_agents.py
index e6292f65..d67e8457 100644
--- a/tests/api_resources/alpha/test_agents.py
+++ b/tests/api_resources/alpha/test_agents.py
@@ -41,9 +41,18 @@ def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
{
"name": "name",
"description": "description",
- "input_schema": {"foo": True},
"metadata": {"foo": True},
- "output_schema": {"foo": True},
+ "parameters": [
+ {
+ "description": "description",
+ "name": "name",
+ "parameter_type": "parameter_type",
+ "required": True,
+ "default": True,
+ "items": {},
+ "title": "title",
+ }
+ ],
}
],
"enable_session_persistence": True,
@@ -238,9 +247,18 @@ async def test_method_create_with_all_params(self, async_client: AsyncLlamaStack
{
"name": "name",
"description": "description",
- "input_schema": {"foo": True},
"metadata": {"foo": True},
- "output_schema": {"foo": True},
+ "parameters": [
+ {
+ "description": "description",
+ "name": "name",
+ "parameter_type": "parameter_type",
+ "required": True,
+ "default": True,
+ "items": {},
+ "title": "title",
+ }
+ ],
}
],
"enable_session_persistence": True,
diff --git a/tests/api_resources/test_benchmarks.py b/tests/api_resources/test_benchmarks.py
deleted file mode 100644
index 97d3d5c9..00000000
--- a/tests/api_resources/test_benchmarks.py
+++ /dev/null
@@ -1,248 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import Benchmark, BenchmarkListResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestBenchmarks:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
- benchmark = client.benchmarks.retrieve(
- "benchmark_id",
- )
- assert_matches_type(Benchmark, benchmark, path=["response"])
-
- @parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
- response = client.benchmarks.with_raw_response.retrieve(
- "benchmark_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- benchmark = response.parse()
- assert_matches_type(Benchmark, benchmark, path=["response"])
-
- @parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
- with client.benchmarks.with_streaming_response.retrieve(
- "benchmark_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- benchmark = response.parse()
- assert_matches_type(Benchmark, benchmark, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- client.benchmarks.with_raw_response.retrieve(
- "",
- )
-
- @parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
- benchmark = client.benchmarks.list()
- assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
-
- @parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
- response = client.benchmarks.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- benchmark = response.parse()
- assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
-
- @parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
- with client.benchmarks.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- benchmark = response.parse()
- assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_register(self, client: LlamaStackClient) -> None:
- benchmark = client.benchmarks.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- )
- assert benchmark is None
-
- @parametrize
- def test_method_register_with_all_params(self, client: LlamaStackClient) -> None:
- benchmark = client.benchmarks.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- metadata={"foo": True},
- provider_benchmark_id="provider_benchmark_id",
- provider_id="provider_id",
- )
- assert benchmark is None
-
- @parametrize
- def test_raw_response_register(self, client: LlamaStackClient) -> None:
- response = client.benchmarks.with_raw_response.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- benchmark = response.parse()
- assert benchmark is None
-
- @parametrize
- def test_streaming_response_register(self, client: LlamaStackClient) -> None:
- with client.benchmarks.with_streaming_response.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- benchmark = response.parse()
- assert benchmark is None
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncBenchmarks:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- benchmark = await async_client.benchmarks.retrieve(
- "benchmark_id",
- )
- assert_matches_type(Benchmark, benchmark, path=["response"])
-
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.benchmarks.with_raw_response.retrieve(
- "benchmark_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- benchmark = await response.parse()
- assert_matches_type(Benchmark, benchmark, path=["response"])
-
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.benchmarks.with_streaming_response.retrieve(
- "benchmark_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- benchmark = await response.parse()
- assert_matches_type(Benchmark, benchmark, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- await async_client.benchmarks.with_raw_response.retrieve(
- "",
- )
-
- @parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
- benchmark = await async_client.benchmarks.list()
- assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
-
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.benchmarks.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- benchmark = await response.parse()
- assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
-
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.benchmarks.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- benchmark = await response.parse()
- assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_register(self, async_client: AsyncLlamaStackClient) -> None:
- benchmark = await async_client.benchmarks.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- )
- assert benchmark is None
-
- @parametrize
- async def test_method_register_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- benchmark = await async_client.benchmarks.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- metadata={"foo": True},
- provider_benchmark_id="provider_benchmark_id",
- provider_id="provider_id",
- )
- assert benchmark is None
-
- @parametrize
- async def test_raw_response_register(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.benchmarks.with_raw_response.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- benchmark = await response.parse()
- assert benchmark is None
-
- @parametrize
- async def test_streaming_response_register(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.benchmarks.with_streaming_response.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- benchmark = await response.parse()
- assert benchmark is None
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_datasets.py b/tests/api_resources/test_datasets.py
deleted file mode 100644
index eee1de8c..00000000
--- a/tests/api_resources/test_datasets.py
+++ /dev/null
@@ -1,521 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import (
- DatasetListResponse,
- DatasetIterrowsResponse,
- DatasetRegisterResponse,
- DatasetRetrieveResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestDatasets:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
- dataset = client.datasets.retrieve(
- "dataset_id",
- )
- assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
-
- @parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
- response = client.datasets.with_raw_response.retrieve(
- "dataset_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = response.parse()
- assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
-
- @parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
- with client.datasets.with_streaming_response.retrieve(
- "dataset_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = response.parse()
- assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- client.datasets.with_raw_response.retrieve(
- "",
- )
-
- @parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
- dataset = client.datasets.list()
- assert_matches_type(DatasetListResponse, dataset, path=["response"])
-
- @parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
- response = client.datasets.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = response.parse()
- assert_matches_type(DatasetListResponse, dataset, path=["response"])
-
- @parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
- with client.datasets.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = response.parse()
- assert_matches_type(DatasetListResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_appendrows(self, client: LlamaStackClient) -> None:
- dataset = client.datasets.appendrows(
- dataset_id="dataset_id",
- rows=[{"foo": True}],
- )
- assert dataset is None
-
- @parametrize
- def test_raw_response_appendrows(self, client: LlamaStackClient) -> None:
- response = client.datasets.with_raw_response.appendrows(
- dataset_id="dataset_id",
- rows=[{"foo": True}],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = response.parse()
- assert dataset is None
-
- @parametrize
- def test_streaming_response_appendrows(self, client: LlamaStackClient) -> None:
- with client.datasets.with_streaming_response.appendrows(
- dataset_id="dataset_id",
- rows=[{"foo": True}],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = response.parse()
- assert dataset is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_appendrows(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- client.datasets.with_raw_response.appendrows(
- dataset_id="",
- rows=[{"foo": True}],
- )
-
- @parametrize
- def test_method_iterrows(self, client: LlamaStackClient) -> None:
- dataset = client.datasets.iterrows(
- dataset_id="dataset_id",
- )
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- @parametrize
- def test_method_iterrows_with_all_params(self, client: LlamaStackClient) -> None:
- dataset = client.datasets.iterrows(
- dataset_id="dataset_id",
- limit=0,
- start_index=0,
- )
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- @parametrize
- def test_raw_response_iterrows(self, client: LlamaStackClient) -> None:
- response = client.datasets.with_raw_response.iterrows(
- dataset_id="dataset_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = response.parse()
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- @parametrize
- def test_streaming_response_iterrows(self, client: LlamaStackClient) -> None:
- with client.datasets.with_streaming_response.iterrows(
- dataset_id="dataset_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = response.parse()
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_iterrows(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- client.datasets.with_raw_response.iterrows(
- dataset_id="",
- )
-
- @parametrize
- def test_method_register(self, client: LlamaStackClient) -> None:
- dataset = client.datasets.register(
- purpose="post-training/messages",
- source={
- "type": "uri",
- "uri": "uri",
- },
- )
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- @parametrize
- def test_method_register_with_all_params(self, client: LlamaStackClient) -> None:
- dataset = client.datasets.register(
- purpose="post-training/messages",
- source={
- "type": "uri",
- "uri": "uri",
- },
- dataset_id="dataset_id",
- metadata={"foo": True},
- )
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- @parametrize
- def test_raw_response_register(self, client: LlamaStackClient) -> None:
- response = client.datasets.with_raw_response.register(
- purpose="post-training/messages",
- source={
- "type": "uri",
- "uri": "uri",
- },
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = response.parse()
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- @parametrize
- def test_streaming_response_register(self, client: LlamaStackClient) -> None:
- with client.datasets.with_streaming_response.register(
- purpose="post-training/messages",
- source={
- "type": "uri",
- "uri": "uri",
- },
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = response.parse()
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_unregister(self, client: LlamaStackClient) -> None:
- dataset = client.datasets.unregister(
- "dataset_id",
- )
- assert dataset is None
-
- @parametrize
- def test_raw_response_unregister(self, client: LlamaStackClient) -> None:
- response = client.datasets.with_raw_response.unregister(
- "dataset_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = response.parse()
- assert dataset is None
-
- @parametrize
- def test_streaming_response_unregister(self, client: LlamaStackClient) -> None:
- with client.datasets.with_streaming_response.unregister(
- "dataset_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = response.parse()
- assert dataset is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_unregister(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- client.datasets.with_raw_response.unregister(
- "",
- )
-
-
-class TestAsyncDatasets:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- dataset = await async_client.datasets.retrieve(
- "dataset_id",
- )
- assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
-
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.datasets.with_raw_response.retrieve(
- "dataset_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = await response.parse()
- assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
-
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.datasets.with_streaming_response.retrieve(
- "dataset_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = await response.parse()
- assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- await async_client.datasets.with_raw_response.retrieve(
- "",
- )
-
- @parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
- dataset = await async_client.datasets.list()
- assert_matches_type(DatasetListResponse, dataset, path=["response"])
-
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.datasets.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = await response.parse()
- assert_matches_type(DatasetListResponse, dataset, path=["response"])
-
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.datasets.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = await response.parse()
- assert_matches_type(DatasetListResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
- dataset = await async_client.datasets.appendrows(
- dataset_id="dataset_id",
- rows=[{"foo": True}],
- )
- assert dataset is None
-
- @parametrize
- async def test_raw_response_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.datasets.with_raw_response.appendrows(
- dataset_id="dataset_id",
- rows=[{"foo": True}],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = await response.parse()
- assert dataset is None
-
- @parametrize
- async def test_streaming_response_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.datasets.with_streaming_response.appendrows(
- dataset_id="dataset_id",
- rows=[{"foo": True}],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = await response.parse()
- assert dataset is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- await async_client.datasets.with_raw_response.appendrows(
- dataset_id="",
- rows=[{"foo": True}],
- )
-
- @parametrize
- async def test_method_iterrows(self, async_client: AsyncLlamaStackClient) -> None:
- dataset = await async_client.datasets.iterrows(
- dataset_id="dataset_id",
- )
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- @parametrize
- async def test_method_iterrows_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- dataset = await async_client.datasets.iterrows(
- dataset_id="dataset_id",
- limit=0,
- start_index=0,
- )
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- @parametrize
- async def test_raw_response_iterrows(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.datasets.with_raw_response.iterrows(
- dataset_id="dataset_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = await response.parse()
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- @parametrize
- async def test_streaming_response_iterrows(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.datasets.with_streaming_response.iterrows(
- dataset_id="dataset_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = await response.parse()
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_iterrows(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- await async_client.datasets.with_raw_response.iterrows(
- dataset_id="",
- )
-
- @parametrize
- async def test_method_register(self, async_client: AsyncLlamaStackClient) -> None:
- dataset = await async_client.datasets.register(
- purpose="post-training/messages",
- source={
- "type": "uri",
- "uri": "uri",
- },
- )
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- @parametrize
- async def test_method_register_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- dataset = await async_client.datasets.register(
- purpose="post-training/messages",
- source={
- "type": "uri",
- "uri": "uri",
- },
- dataset_id="dataset_id",
- metadata={"foo": True},
- )
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- @parametrize
- async def test_raw_response_register(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.datasets.with_raw_response.register(
- purpose="post-training/messages",
- source={
- "type": "uri",
- "uri": "uri",
- },
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = await response.parse()
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- @parametrize
- async def test_streaming_response_register(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.datasets.with_streaming_response.register(
- purpose="post-training/messages",
- source={
- "type": "uri",
- "uri": "uri",
- },
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = await response.parse()
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- dataset = await async_client.datasets.unregister(
- "dataset_id",
- )
- assert dataset is None
-
- @parametrize
- async def test_raw_response_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.datasets.with_raw_response.unregister(
- "dataset_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = await response.parse()
- assert dataset is None
-
- @parametrize
- async def test_streaming_response_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.datasets.with_streaming_response.unregister(
- "dataset_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = await response.parse()
- assert dataset is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- await async_client.datasets.with_raw_response.unregister(
- "",
- )
diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py
index ad2ab3be..ebea8b70 100644
--- a/tests/api_resources/test_responses.py
+++ b/tests/api_resources/test_responses.py
@@ -12,6 +12,7 @@
from llama_stack_client.types import (
ResponseObject,
ResponseListResponse,
+ ResponseCreateResponse,
ResponseDeleteResponse,
)
from llama_stack_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
@@ -24,133 +25,73 @@ class TestResponses:
@parametrize
def test_method_create_overload_1(self, client: LlamaStackClient) -> None:
- response = client.responses.create(
- input="string",
- model="model",
- )
- assert_matches_type(ResponseObject, response, path=["response"])
+ response = client.responses.create()
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
@parametrize
def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient) -> None:
response = client.responses.create(
- input="string",
+ after="after",
+ limit=0,
model="model",
- include=["string"],
- instructions="instructions",
- max_infer_iters=0,
- previous_response_id="previous_response_id",
- store=True,
- stream=False,
- temperature=0,
- text={
- "format": {
- "type": "text",
- "description": "description",
- "name": "name",
- "schema": {"foo": True},
- "strict": True,
- }
- },
- tools=[
- {
- "type": "web_search",
- "search_context_size": "search_context_size",
- }
- ],
+ order="asc",
)
- assert_matches_type(ResponseObject, response, path=["response"])
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
@parametrize
def test_raw_response_create_overload_1(self, client: LlamaStackClient) -> None:
- http_response = client.responses.with_raw_response.create(
- input="string",
- model="model",
- )
+ http_response = client.responses.with_raw_response.create()
assert http_response.is_closed is True
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = http_response.parse()
- assert_matches_type(ResponseObject, response, path=["response"])
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
@parametrize
def test_streaming_response_create_overload_1(self, client: LlamaStackClient) -> None:
- with client.responses.with_streaming_response.create(
- input="string",
- model="model",
- ) as http_response:
+ with client.responses.with_streaming_response.create() as http_response:
assert not http_response.is_closed
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = http_response.parse()
- assert_matches_type(ResponseObject, response, path=["response"])
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
assert cast(Any, http_response.is_closed) is True
@parametrize
def test_method_create_overload_2(self, client: LlamaStackClient) -> None:
- response_stream = client.responses.create(
- input="string",
- model="model",
- stream=True,
- )
- response_stream.response.close()
+ response = client.responses.create()
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
@parametrize
def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient) -> None:
- response_stream = client.responses.create(
- input="string",
+ response = client.responses.create(
+ after="after",
+ limit=0,
model="model",
- stream=True,
- include=["string"],
- instructions="instructions",
- max_infer_iters=0,
- previous_response_id="previous_response_id",
- store=True,
- temperature=0,
- text={
- "format": {
- "type": "text",
- "description": "description",
- "name": "name",
- "schema": {"foo": True},
- "strict": True,
- }
- },
- tools=[
- {
- "type": "web_search",
- "search_context_size": "search_context_size",
- }
- ],
+ order="asc",
)
- response_stream.response.close()
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
@parametrize
def test_raw_response_create_overload_2(self, client: LlamaStackClient) -> None:
- response = client.responses.with_raw_response.create(
- input="string",
- model="model",
- stream=True,
- )
+ http_response = client.responses.with_raw_response.create()
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- stream = response.parse()
- stream.close()
+ assert http_response.is_closed is True
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+ response = http_response.parse()
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
@parametrize
def test_streaming_response_create_overload_2(self, client: LlamaStackClient) -> None:
- with client.responses.with_streaming_response.create(
- input="string",
- model="model",
- stream=True,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with client.responses.with_streaming_response.create() as http_response:
+ assert not http_response.is_closed
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- stream = response.parse()
- stream.close()
+ response = http_response.parse()
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
- assert cast(Any, response.is_closed) is True
+ assert cast(Any, http_response.is_closed) is True
@parametrize
def test_method_retrieve(self, client: LlamaStackClient) -> None:
@@ -271,133 +212,73 @@ class TestAsyncResponses:
@parametrize
async def test_method_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.responses.create(
- input="string",
- model="model",
- )
- assert_matches_type(ResponseObject, response, path=["response"])
+ response = await async_client.responses.create()
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
@parametrize
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
response = await async_client.responses.create(
- input="string",
+ after="after",
+ limit=0,
model="model",
- include=["string"],
- instructions="instructions",
- max_infer_iters=0,
- previous_response_id="previous_response_id",
- store=True,
- stream=False,
- temperature=0,
- text={
- "format": {
- "type": "text",
- "description": "description",
- "name": "name",
- "schema": {"foo": True},
- "strict": True,
- }
- },
- tools=[
- {
- "type": "web_search",
- "search_context_size": "search_context_size",
- }
- ],
+ order="asc",
)
- assert_matches_type(ResponseObject, response, path=["response"])
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
@parametrize
async def test_raw_response_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
- http_response = await async_client.responses.with_raw_response.create(
- input="string",
- model="model",
- )
+ http_response = await async_client.responses.with_raw_response.create()
assert http_response.is_closed is True
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = await http_response.parse()
- assert_matches_type(ResponseObject, response, path=["response"])
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
@parametrize
async def test_streaming_response_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.responses.with_streaming_response.create(
- input="string",
- model="model",
- ) as http_response:
+ async with async_client.responses.with_streaming_response.create() as http_response:
assert not http_response.is_closed
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = await http_response.parse()
- assert_matches_type(ResponseObject, response, path=["response"])
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
assert cast(Any, http_response.is_closed) is True
@parametrize
async def test_method_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
- response_stream = await async_client.responses.create(
- input="string",
- model="model",
- stream=True,
- )
- await response_stream.response.aclose()
+ response = await async_client.responses.create()
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
@parametrize
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
- response_stream = await async_client.responses.create(
- input="string",
+ response = await async_client.responses.create(
+ after="after",
+ limit=0,
model="model",
- stream=True,
- include=["string"],
- instructions="instructions",
- max_infer_iters=0,
- previous_response_id="previous_response_id",
- store=True,
- temperature=0,
- text={
- "format": {
- "type": "text",
- "description": "description",
- "name": "name",
- "schema": {"foo": True},
- "strict": True,
- }
- },
- tools=[
- {
- "type": "web_search",
- "search_context_size": "search_context_size",
- }
- ],
+ order="asc",
)
- await response_stream.response.aclose()
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
@parametrize
async def test_raw_response_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.responses.with_raw_response.create(
- input="string",
- model="model",
- stream=True,
- )
+ http_response = await async_client.responses.with_raw_response.create()
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- stream = await response.parse()
- await stream.close()
+ assert http_response.is_closed is True
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+ response = await http_response.parse()
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
@parametrize
async def test_streaming_response_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.responses.with_streaming_response.create(
- input="string",
- model="model",
- stream=True,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ async with async_client.responses.with_streaming_response.create() as http_response:
+ assert not http_response.is_closed
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- stream = await response.parse()
- await stream.close()
+ response = await http_response.parse()
+ assert_matches_type(ResponseCreateResponse, response, path=["response"])
- assert cast(Any, response.is_closed) is True
+ assert cast(Any, http_response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
diff --git a/tests/api_resources/test_telemetry.py b/tests/api_resources/test_telemetry.py
index ea123787..6191757b 100644
--- a/tests/api_resources/test_telemetry.py
+++ b/tests/api_resources/test_telemetry.py
@@ -7,16 +7,7 @@
import pytest
-from tests.utils import assert_matches_type
from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import (
- Trace,
- TelemetryGetSpanResponse,
- TelemetryQuerySpansResponse,
- TelemetryGetSpanTreeResponse,
- TelemetryQueryTracesResponse,
- TelemetryQueryMetricsResponse,
-)
from llama_stack_client._utils import parse_datetime
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -25,139 +16,6 @@
class TestTelemetry:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
- @parametrize
- def test_method_get_span(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.get_span(
- span_id="span_id",
- trace_id="trace_id",
- )
- assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"])
-
- @parametrize
- def test_raw_response_get_span(self, client: LlamaStackClient) -> None:
- response = client.telemetry.with_raw_response.get_span(
- span_id="span_id",
- trace_id="trace_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = response.parse()
- assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"])
-
- @parametrize
- def test_streaming_response_get_span(self, client: LlamaStackClient) -> None:
- with client.telemetry.with_streaming_response.get_span(
- span_id="span_id",
- trace_id="trace_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = response.parse()
- assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_get_span(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `trace_id` but received ''"):
- client.telemetry.with_raw_response.get_span(
- span_id="span_id",
- trace_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `span_id` but received ''"):
- client.telemetry.with_raw_response.get_span(
- span_id="",
- trace_id="trace_id",
- )
-
- @parametrize
- def test_method_get_span_tree(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.get_span_tree(
- span_id="span_id",
- )
- assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
-
- @parametrize
- def test_method_get_span_tree_with_all_params(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.get_span_tree(
- span_id="span_id",
- attributes_to_return=["string"],
- max_depth=0,
- )
- assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
-
- @parametrize
- def test_raw_response_get_span_tree(self, client: LlamaStackClient) -> None:
- response = client.telemetry.with_raw_response.get_span_tree(
- span_id="span_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = response.parse()
- assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
-
- @parametrize
- def test_streaming_response_get_span_tree(self, client: LlamaStackClient) -> None:
- with client.telemetry.with_streaming_response.get_span_tree(
- span_id="span_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = response.parse()
- assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_get_span_tree(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `span_id` but received ''"):
- client.telemetry.with_raw_response.get_span_tree(
- span_id="",
- )
-
- @parametrize
- def test_method_get_trace(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.get_trace(
- "trace_id",
- )
- assert_matches_type(Trace, telemetry, path=["response"])
-
- @parametrize
- def test_raw_response_get_trace(self, client: LlamaStackClient) -> None:
- response = client.telemetry.with_raw_response.get_trace(
- "trace_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = response.parse()
- assert_matches_type(Trace, telemetry, path=["response"])
-
- @parametrize
- def test_streaming_response_get_trace(self, client: LlamaStackClient) -> None:
- with client.telemetry.with_streaming_response.get_trace(
- "trace_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = response.parse()
- assert_matches_type(Trace, telemetry, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_get_trace(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `trace_id` but received ''"):
- client.telemetry.with_raw_response.get_trace(
- "",
- )
-
@parametrize
def test_method_log_event(self, client: LlamaStackClient) -> None:
telemetry = client.telemetry.log_event(
@@ -229,401 +87,12 @@ def test_streaming_response_log_event(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- def test_method_query_metrics(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.query_metrics(
- metric_name="metric_name",
- query_type="range",
- start_time=0,
- )
- assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- def test_method_query_metrics_with_all_params(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.query_metrics(
- metric_name="metric_name",
- query_type="range",
- start_time=0,
- end_time=0,
- granularity="granularity",
- label_matchers=[
- {
- "name": "name",
- "operator": "=",
- "value": "value",
- }
- ],
- )
- assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- def test_raw_response_query_metrics(self, client: LlamaStackClient) -> None:
- response = client.telemetry.with_raw_response.query_metrics(
- metric_name="metric_name",
- query_type="range",
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = response.parse()
- assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- def test_streaming_response_query_metrics(self, client: LlamaStackClient) -> None:
- with client.telemetry.with_streaming_response.query_metrics(
- metric_name="metric_name",
- query_type="range",
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = response.parse()
- assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- def test_path_params_query_metrics(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `metric_name` but received ''"):
- client.telemetry.with_raw_response.query_metrics(
- metric_name="",
- query_type="range",
- start_time=0,
- )
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- def test_method_query_spans(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.query_spans(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_return=["string"],
- )
- assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- def test_method_query_spans_with_all_params(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.query_spans(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_return=["string"],
- max_depth=0,
- )
- assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- def test_raw_response_query_spans(self, client: LlamaStackClient) -> None:
- response = client.telemetry.with_raw_response.query_spans(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_return=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = response.parse()
- assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- def test_streaming_response_query_spans(self, client: LlamaStackClient) -> None:
- with client.telemetry.with_streaming_response.query_spans(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_return=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = response.parse()
- assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- def test_method_query_traces(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.query_traces()
- assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- def test_method_query_traces_with_all_params(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.query_traces(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- limit=0,
- offset=0,
- order_by=["string"],
- )
- assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- def test_raw_response_query_traces(self, client: LlamaStackClient) -> None:
- response = client.telemetry.with_raw_response.query_traces()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = response.parse()
- assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- def test_streaming_response_query_traces(self, client: LlamaStackClient) -> None:
- with client.telemetry.with_streaming_response.query_traces() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = response.parse()
- assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_save_spans_to_dataset(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.save_spans_to_dataset(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_save=["string"],
- dataset_id="dataset_id",
- )
- assert telemetry is None
-
- @parametrize
- def test_method_save_spans_to_dataset_with_all_params(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.save_spans_to_dataset(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_save=["string"],
- dataset_id="dataset_id",
- max_depth=0,
- )
- assert telemetry is None
-
- @parametrize
- def test_raw_response_save_spans_to_dataset(self, client: LlamaStackClient) -> None:
- response = client.telemetry.with_raw_response.save_spans_to_dataset(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_save=["string"],
- dataset_id="dataset_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = response.parse()
- assert telemetry is None
-
- @parametrize
- def test_streaming_response_save_spans_to_dataset(self, client: LlamaStackClient) -> None:
- with client.telemetry.with_streaming_response.save_spans_to_dataset(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_save=["string"],
- dataset_id="dataset_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = response.parse()
- assert telemetry is None
-
- assert cast(Any, response.is_closed) is True
-
class TestAsyncTelemetry:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
- @parametrize
- async def test_method_get_span(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.get_span(
- span_id="span_id",
- trace_id="trace_id",
- )
- assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"])
-
- @parametrize
- async def test_raw_response_get_span(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.telemetry.with_raw_response.get_span(
- span_id="span_id",
- trace_id="trace_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = await response.parse()
- assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"])
-
- @parametrize
- async def test_streaming_response_get_span(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.telemetry.with_streaming_response.get_span(
- span_id="span_id",
- trace_id="trace_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = await response.parse()
- assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_get_span(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `trace_id` but received ''"):
- await async_client.telemetry.with_raw_response.get_span(
- span_id="span_id",
- trace_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `span_id` but received ''"):
- await async_client.telemetry.with_raw_response.get_span(
- span_id="",
- trace_id="trace_id",
- )
-
- @parametrize
- async def test_method_get_span_tree(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.get_span_tree(
- span_id="span_id",
- )
- assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
-
- @parametrize
- async def test_method_get_span_tree_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.get_span_tree(
- span_id="span_id",
- attributes_to_return=["string"],
- max_depth=0,
- )
- assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
-
- @parametrize
- async def test_raw_response_get_span_tree(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.telemetry.with_raw_response.get_span_tree(
- span_id="span_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = await response.parse()
- assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
-
- @parametrize
- async def test_streaming_response_get_span_tree(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.telemetry.with_streaming_response.get_span_tree(
- span_id="span_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = await response.parse()
- assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_get_span_tree(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `span_id` but received ''"):
- await async_client.telemetry.with_raw_response.get_span_tree(
- span_id="",
- )
-
- @parametrize
- async def test_method_get_trace(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.get_trace(
- "trace_id",
- )
- assert_matches_type(Trace, telemetry, path=["response"])
-
- @parametrize
- async def test_raw_response_get_trace(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.telemetry.with_raw_response.get_trace(
- "trace_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = await response.parse()
- assert_matches_type(Trace, telemetry, path=["response"])
-
- @parametrize
- async def test_streaming_response_get_trace(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.telemetry.with_streaming_response.get_trace(
- "trace_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = await response.parse()
- assert_matches_type(Trace, telemetry, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_get_trace(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `trace_id` but received ''"):
- await async_client.telemetry.with_raw_response.get_trace(
- "",
- )
-
@parametrize
async def test_method_log_event(self, async_client: AsyncLlamaStackClient) -> None:
telemetry = await async_client.telemetry.log_event(
@@ -694,259 +163,3 @@ async def test_streaming_response_log_event(self, async_client: AsyncLlamaStackC
assert telemetry is None
assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- async def test_method_query_metrics(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.query_metrics(
- metric_name="metric_name",
- query_type="range",
- start_time=0,
- )
- assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- async def test_method_query_metrics_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.query_metrics(
- metric_name="metric_name",
- query_type="range",
- start_time=0,
- end_time=0,
- granularity="granularity",
- label_matchers=[
- {
- "name": "name",
- "operator": "=",
- "value": "value",
- }
- ],
- )
- assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- async def test_raw_response_query_metrics(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.telemetry.with_raw_response.query_metrics(
- metric_name="metric_name",
- query_type="range",
- start_time=0,
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = await response.parse()
- assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- async def test_streaming_response_query_metrics(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.telemetry.with_streaming_response.query_metrics(
- metric_name="metric_name",
- query_type="range",
- start_time=0,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = await response.parse()
- assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- async def test_path_params_query_metrics(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `metric_name` but received ''"):
- await async_client.telemetry.with_raw_response.query_metrics(
- metric_name="",
- query_type="range",
- start_time=0,
- )
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- async def test_method_query_spans(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.query_spans(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_return=["string"],
- )
- assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- async def test_method_query_spans_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.query_spans(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_return=["string"],
- max_depth=0,
- )
- assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- async def test_raw_response_query_spans(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.telemetry.with_raw_response.query_spans(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_return=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = await response.parse()
- assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- async def test_streaming_response_query_spans(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.telemetry.with_streaming_response.query_spans(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_return=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = await response.parse()
- assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- async def test_method_query_traces(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.query_traces()
- assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- async def test_method_query_traces_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.query_traces(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- limit=0,
- offset=0,
- order_by=["string"],
- )
- assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- async def test_raw_response_query_traces(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.telemetry.with_raw_response.query_traces()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = await response.parse()
- assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
-
- @pytest.mark.skip(reason="unsupported query params in java / kotlin")
- @parametrize
- async def test_streaming_response_query_traces(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.telemetry.with_streaming_response.query_traces() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = await response.parse()
- assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_save_spans_to_dataset(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.save_spans_to_dataset(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_save=["string"],
- dataset_id="dataset_id",
- )
- assert telemetry is None
-
- @parametrize
- async def test_method_save_spans_to_dataset_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.save_spans_to_dataset(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_save=["string"],
- dataset_id="dataset_id",
- max_depth=0,
- )
- assert telemetry is None
-
- @parametrize
- async def test_raw_response_save_spans_to_dataset(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.telemetry.with_raw_response.save_spans_to_dataset(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_save=["string"],
- dataset_id="dataset_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- telemetry = await response.parse()
- assert telemetry is None
-
- @parametrize
- async def test_streaming_response_save_spans_to_dataset(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.telemetry.with_streaming_response.save_spans_to_dataset(
- attribute_filters=[
- {
- "key": "key",
- "op": "eq",
- "value": True,
- }
- ],
- attributes_to_save=["string"],
- dataset_id="dataset_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- telemetry = await response.parse()
- assert telemetry is None
-
- assert cast(Any, response.is_closed) is True
From 67b3d02467b6cb12d606d0626bbc2b52ae767885 Mon Sep 17 00:00:00 2001
From: Ashwin Bharambe
Date: Thu, 2 Oct 2025 09:51:00 -0700
Subject: [PATCH 3/9] fix(manual): update lib/ code for the input_schema
changes
---
.../lib/agents/client_tool.py | 81 +++++++++++++------
.../lib/agents/react/agent.py | 4 +-
2 files changed, 60 insertions(+), 25 deletions(-)
diff --git a/src/llama_stack_client/lib/agents/client_tool.py b/src/llama_stack_client/lib/agents/client_tool.py
index f017d651..f332aa13 100644
--- a/src/llama_stack_client/lib/agents/client_tool.py
+++ b/src/llama_stack_client/lib/agents/client_tool.py
@@ -19,9 +19,17 @@
Union,
)
+from typing_extensions import TypedDict
+
from llama_stack_client.types import CompletionMessage, Message
from llama_stack_client.types.alpha import ToolResponse
-from llama_stack_client.types.tool_def_param import Parameter, ToolDefParam
+from llama_stack_client.types.tool_def_param import ToolDefParam
+
+
+class JSONSchema(TypedDict, total=False):
+ type: str
+ properties: Dict[str, Any]
+ required: List[str]
class ClientTool:
@@ -47,28 +55,18 @@ def get_description(self) -> str:
raise NotImplementedError
@abstractmethod
- def get_params_definition(self) -> Dict[str, Parameter]:
+ def get_input_schema(self) -> JSONSchema:
raise NotImplementedError
def get_instruction_string(self) -> str:
return f"Use the function '{self.get_name()}' to: {self.get_description()}"
- def parameters_for_system_prompt(self) -> str:
- return json.dumps(
- {
- "name": self.get_name(),
- "description": self.get_description(),
- "parameters": {name: definition for name, definition in self.get_params_definition().items()},
- }
- )
-
def get_tool_definition(self) -> ToolDefParam:
return ToolDefParam(
name=self.get_name(),
description=self.get_description(),
- parameters=list(self.get_params_definition().values()),
+ input_schema=self.get_input_schema(),
metadata={},
- tool_prompt_format="python_list",
)
def run(
@@ -148,6 +146,37 @@ def async_run_impl(self, **kwargs):
T = TypeVar("T", bound=Callable)
+def _python_type_to_json_schema_type(type_hint: Any) -> str:
+ """Convert Python type hints to JSON Schema type strings."""
+ # Handle Union types (e.g., Optional[str])
+ origin = get_origin(type_hint)
+ if origin is Union:
+ # Get non-None types from Union
+ args = [arg for arg in get_args(type_hint) if arg is not type(None)]
+ if args:
+ type_hint = args[0] # Use first non-None type
+
+ # Get the actual type if it's a generic
+ if hasattr(type_hint, "__origin__"):
+ type_hint = type_hint.__origin__
+
+ # Map Python types to JSON Schema types
+ type_name = getattr(type_hint, "__name__", str(type_hint))
+
+ type_mapping = {
+ "bool": "boolean",
+ "int": "integer",
+ "float": "number",
+ "str": "string",
+ "list": "array",
+ "dict": "object",
+ "List": "array",
+ "Dict": "object",
+ }
+
+ return type_mapping.get(type_name, "string") # Default to string if unknown
+
+
def client_tool(func: T) -> ClientTool:
"""
Decorator to convert a function into a ClientTool.
@@ -188,13 +217,14 @@ def get_description(self) -> str:
f"No description found for client tool {__name__}. Please provide a RST-style docstring with description and :param tags for each parameter."
)
- def get_params_definition(self) -> Dict[str, Parameter]:
+ def get_input_schema(self) -> JSONSchema:
hints = get_type_hints(func)
# Remove return annotation if present
hints.pop("return", None)
# Get parameter descriptions from docstring
- params = {}
+ properties = {}
+ required = []
sig = inspect.signature(func)
doc = inspect.getdoc(func) or ""
@@ -212,15 +242,20 @@ def get_params_definition(self) -> Dict[str, Parameter]:
param = sig.parameters[name]
is_optional_type = get_origin(type_hint) is Union and type(None) in get_args(type_hint)
is_required = param.default == inspect.Parameter.empty and not is_optional_type
- params[name] = Parameter(
- name=name,
- description=param_doc or f"Parameter {name}",
- parameter_type=type_hint.__name__,
- default=(param.default if param.default != inspect.Parameter.empty else None),
- required=is_required,
- )
- return params
+ properties[name] = {
+ "type": _python_type_to_json_schema_type(type_hint),
+ "description": param_doc,
+ }
+
+ if is_required:
+ required.append(name)
+
+ return {
+ "type": "object",
+ "properties": properties,
+ "required": required,
+ }
def run_impl(self, **kwargs) -> Any:
if inspect.iscoroutinefunction(func):
diff --git a/src/llama_stack_client/lib/agents/react/agent.py b/src/llama_stack_client/lib/agents/react/agent.py
index d1ca4777..919f0420 100644
--- a/src/llama_stack_client/lib/agents/react/agent.py
+++ b/src/llama_stack_client/lib/agents/react/agent.py
@@ -37,7 +37,7 @@ def get_tool_defs(
{
"name": tool.identifier,
"description": tool.description,
- "parameters": tool.parameters,
+ "input_schema": tool.input_schema,
}
for tool in client.tools.list(toolgroup_id=toolgroup_id)
]
@@ -48,7 +48,7 @@ def get_tool_defs(
{
"name": tool.get_name(),
"description": tool.get_description(),
- "parameters": tool.get_params_definition(),
+ "input_schema": tool.get_input_schema(),
}
for tool in client_tools
]
From 4e24a76a0065b5ebea99a5792389ce9aa0fe7483 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 2 Oct 2025 18:05:41 +0000
Subject: [PATCH 4/9] fix(api): fix the ToolDefParam updates
---
.stats.yml | 4 +-
api.md | 4 +-
src/llama_stack_client/resources/tools.py | 10 ++--
src/llama_stack_client/types/__init__.py | 2 -
.../types/list_tools_response.py | 11 ----
.../types/shared/tool_call.py | 14 +----
.../types/shared_params/tool_call.py | 23 +-------
src/llama_stack_client/types/tool.py | 54 -------------------
src/llama_stack_client/types/tool_def.py | 35 ++++--------
.../types/tool_def_param.py | 35 ++++--------
.../types/tool_list_response.py | 4 +-
tests/api_resources/alpha/test_agents.py | 28 +++-------
tests/api_resources/test_tools.py | 14 ++---
13 files changed, 46 insertions(+), 192 deletions(-)
delete mode 100644 src/llama_stack_client/types/list_tools_response.py
delete mode 100644 src/llama_stack_client/types/tool.py
diff --git a/.stats.yml b/.stats.yml
index cbd436bf..724604de 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 93
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-41cb5d8049e6ffd933a7ad6bbbb76b2fef2e864d0d857c91799ee16e9a796883.yml
-openapi_spec_hash: 5e0bdf64563e020ef14b968ab724d2db
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f26df77f0800baeaea40407776f6c1e618756037969411e29de209ce961655dd.yml
+openapi_spec_hash: e7c2329edc0f9f5aa1c78b6afb996e1c
config_hash: 0412cd40c0609550c1a47c69dd104e4f
diff --git a/api.md b/api.md
index cdb427e2..0403c604 100644
--- a/api.md
+++ b/api.md
@@ -42,13 +42,13 @@ Methods:
Types:
```python
-from llama_stack_client.types import ListToolsResponse, Tool, ToolListResponse
+from llama_stack_client.types import ToolListResponse
```
Methods:
- client.tools.list(\*\*params) -> ToolListResponse
-- client.tools.get(tool_name) -> Tool
+- client.tools.get(tool_name) -> ToolDef
# ToolRuntime
diff --git a/src/llama_stack_client/resources/tools.py b/src/llama_stack_client/resources/tools.py
index 6d405bed..adbf4402 100644
--- a/src/llama_stack_client/resources/tools.py
+++ b/src/llama_stack_client/resources/tools.py
@@ -18,8 +18,8 @@
async_to_streamed_response_wrapper,
)
from .._wrappers import DataWrapper
-from ..types.tool import Tool
from .._base_client import make_request_options
+from ..types.tool_def import ToolDef
from ..types.tool_list_response import ToolListResponse
__all__ = ["ToolsResource", "AsyncToolsResource"]
@@ -93,7 +93,7 @@ def get(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Tool:
+ ) -> ToolDef:
"""
Get a tool by its name.
@@ -113,7 +113,7 @@ def get(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=Tool,
+ cast_to=ToolDef,
)
@@ -185,7 +185,7 @@ async def get(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Tool:
+ ) -> ToolDef:
"""
Get a tool by its name.
@@ -205,7 +205,7 @@ async def get(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=Tool,
+ cast_to=ToolDef,
)
diff --git a/src/llama_stack_client/types/__init__.py b/src/llama_stack_client/types/__init__.py
index c7053fec..6cc3787f 100644
--- a/src/llama_stack_client/types/__init__.py
+++ b/src/llama_stack_client/types/__init__.py
@@ -3,7 +3,6 @@
from __future__ import annotations
from .file import File as File
-from .tool import Tool as Tool
from .model import Model as Model
from .shared import (
Message as Message,
@@ -43,7 +42,6 @@
from .file_create_params import FileCreateParams as FileCreateParams
from .tool_list_response import ToolListResponse as ToolListResponse
from .list_files_response import ListFilesResponse as ListFilesResponse
-from .list_tools_response import ListToolsResponse as ListToolsResponse
from .model_list_response import ModelListResponse as ModelListResponse
from .route_list_response import RouteListResponse as RouteListResponse
from .run_shield_response import RunShieldResponse as RunShieldResponse
diff --git a/src/llama_stack_client/types/list_tools_response.py b/src/llama_stack_client/types/list_tools_response.py
deleted file mode 100644
index 47f040b5..00000000
--- a/src/llama_stack_client/types/list_tools_response.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-from .tool_list_response import ToolListResponse
-
-__all__ = ["ListToolsResponse"]
-
-
-class ListToolsResponse(BaseModel):
- data: ToolListResponse
- """List of tools"""
diff --git a/src/llama_stack_client/types/shared/tool_call.py b/src/llama_stack_client/types/shared/tool_call.py
index b9301d75..a35cd6dd 100644
--- a/src/llama_stack_client/types/shared/tool_call.py
+++ b/src/llama_stack_client/types/shared/tool_call.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Dict, List, Union, Optional
+from typing import Union
from typing_extensions import Literal
from ..._models import BaseModel
@@ -9,18 +9,8 @@
class ToolCall(BaseModel):
- arguments: Union[
- str,
- Dict[
- str,
- Union[
- str, float, bool, List[Union[str, float, bool, None]], Dict[str, Union[str, float, bool, None]], None
- ],
- ],
- ]
+ arguments: str
call_id: str
tool_name: Union[Literal["brave_search", "wolfram_alpha", "photogen", "code_interpreter"], str]
-
- arguments_json: Optional[str] = None
diff --git a/src/llama_stack_client/types/shared_params/tool_call.py b/src/llama_stack_client/types/shared_params/tool_call.py
index 55d53099..16686e61 100644
--- a/src/llama_stack_client/types/shared_params/tool_call.py
+++ b/src/llama_stack_client/types/shared_params/tool_call.py
@@ -2,34 +2,15 @@
from __future__ import annotations
-from typing import Dict, Union
+from typing import Union
from typing_extensions import Literal, Required, TypedDict
-from ..._types import SequenceNotStr
-
__all__ = ["ToolCall"]
class ToolCall(TypedDict, total=False):
- arguments: Required[
- Union[
- str,
- Dict[
- str,
- Union[
- str,
- float,
- bool,
- SequenceNotStr[Union[str, float, bool, None]],
- Dict[str, Union[str, float, bool, None]],
- None,
- ],
- ],
- ]
- ]
+ arguments: Required[str]
call_id: Required[str]
tool_name: Required[Union[Literal["brave_search", "wolfram_alpha", "photogen", "code_interpreter"], str]]
-
- arguments_json: str
diff --git a/src/llama_stack_client/types/tool.py b/src/llama_stack_client/types/tool.py
deleted file mode 100644
index a7243b64..00000000
--- a/src/llama_stack_client/types/tool.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["Tool", "Parameter"]
-
-
-class Parameter(BaseModel):
- description: str
- """Human-readable description of what the parameter does"""
-
- name: str
- """Name of the parameter"""
-
- parameter_type: str
- """Type of the parameter (e.g., string, integer)"""
-
- required: bool
- """Whether this parameter is required for tool invocation"""
-
- default: Union[bool, float, str, List[object], object, None] = None
- """(Optional) Default value for the parameter if not provided"""
-
- items: Optional[object] = None
- """Type of the elements when parameter_type is array"""
-
- title: Optional[str] = None
- """(Optional) Title of the parameter"""
-
-
-class Tool(BaseModel):
- description: str
- """Human-readable description of what the tool does"""
-
- identifier: str
-
- parameters: List[Parameter]
- """List of parameters this tool accepts"""
-
- provider_id: str
-
- toolgroup_id: str
- """ID of the tool group this tool belongs to"""
-
- type: Literal["tool"]
- """Type of resource, always 'tool'"""
-
- metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
- """(Optional) Additional metadata about the tool"""
-
- provider_resource_id: Optional[str] = None
diff --git a/src/llama_stack_client/types/tool_def.py b/src/llama_stack_client/types/tool_def.py
index 21949b41..4674f832 100644
--- a/src/llama_stack_client/types/tool_def.py
+++ b/src/llama_stack_client/types/tool_def.py
@@ -4,30 +4,7 @@
from .._models import BaseModel
-__all__ = ["ToolDef", "Parameter"]
-
-
-class Parameter(BaseModel):
- description: str
- """Human-readable description of what the parameter does"""
-
- name: str
- """Name of the parameter"""
-
- parameter_type: str
- """Type of the parameter (e.g., string, integer)"""
-
- required: bool
- """Whether this parameter is required for tool invocation"""
-
- default: Union[bool, float, str, List[object], object, None] = None
- """(Optional) Default value for the parameter if not provided"""
-
- items: Optional[object] = None
- """Type of the elements when parameter_type is array"""
-
- title: Optional[str] = None
- """(Optional) Title of the parameter"""
+__all__ = ["ToolDef"]
class ToolDef(BaseModel):
@@ -37,8 +14,14 @@ class ToolDef(BaseModel):
description: Optional[str] = None
"""(Optional) Human-readable description of what the tool does"""
+ input_schema: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """(Optional) JSON Schema for tool inputs (MCP inputSchema)"""
+
metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
"""(Optional) Additional metadata about the tool"""
- parameters: Optional[List[Parameter]] = None
- """(Optional) List of parameters this tool accepts"""
+ output_schema: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """(Optional) JSON Schema for tool outputs (MCP outputSchema)"""
+
+ toolgroup_id: Optional[str] = None
+ """(Optional) ID of the tool group this tool belongs to"""
diff --git a/src/llama_stack_client/types/tool_def_param.py b/src/llama_stack_client/types/tool_def_param.py
index a50437b2..d14ef6cc 100644
--- a/src/llama_stack_client/types/tool_def_param.py
+++ b/src/llama_stack_client/types/tool_def_param.py
@@ -5,30 +5,7 @@
from typing import Dict, Union, Iterable
from typing_extensions import Required, TypedDict
-__all__ = ["ToolDefParam", "Parameter"]
-
-
-class Parameter(TypedDict, total=False):
- description: Required[str]
- """Human-readable description of what the parameter does"""
-
- name: Required[str]
- """Name of the parameter"""
-
- parameter_type: Required[str]
- """Type of the parameter (e.g., string, integer)"""
-
- required: Required[bool]
- """Whether this parameter is required for tool invocation"""
-
- default: Union[bool, float, str, Iterable[object], object, None]
- """(Optional) Default value for the parameter if not provided"""
-
- items: object
- """Type of the elements when parameter_type is array"""
-
- title: str
- """(Optional) Title of the parameter"""
+__all__ = ["ToolDefParam"]
class ToolDefParam(TypedDict, total=False):
@@ -38,8 +15,14 @@ class ToolDefParam(TypedDict, total=False):
description: str
"""(Optional) Human-readable description of what the tool does"""
+ input_schema: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
+ """(Optional) JSON Schema for tool inputs (MCP inputSchema)"""
+
metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
"""(Optional) Additional metadata about the tool"""
- parameters: Iterable[Parameter]
- """(Optional) List of parameters this tool accepts"""
+ output_schema: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
+ """(Optional) JSON Schema for tool outputs (MCP outputSchema)"""
+
+ toolgroup_id: str
+ """(Optional) ID of the tool group this tool belongs to"""
diff --git a/src/llama_stack_client/types/tool_list_response.py b/src/llama_stack_client/types/tool_list_response.py
index 11750ace..bb6c935f 100644
--- a/src/llama_stack_client/types/tool_list_response.py
+++ b/src/llama_stack_client/types/tool_list_response.py
@@ -3,8 +3,8 @@
from typing import List
from typing_extensions import TypeAlias
-from .tool import Tool
+from .tool_def import ToolDef
__all__ = ["ToolListResponse"]
-ToolListResponse: TypeAlias = List[Tool]
+ToolListResponse: TypeAlias = List[ToolDef]
diff --git a/tests/api_resources/alpha/test_agents.py b/tests/api_resources/alpha/test_agents.py
index d67e8457..075bd478 100644
--- a/tests/api_resources/alpha/test_agents.py
+++ b/tests/api_resources/alpha/test_agents.py
@@ -41,18 +41,10 @@ def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
{
"name": "name",
"description": "description",
+ "input_schema": {"foo": True},
"metadata": {"foo": True},
- "parameters": [
- {
- "description": "description",
- "name": "name",
- "parameter_type": "parameter_type",
- "required": True,
- "default": True,
- "items": {},
- "title": "title",
- }
- ],
+ "output_schema": {"foo": True},
+ "toolgroup_id": "toolgroup_id",
}
],
"enable_session_persistence": True,
@@ -247,18 +239,10 @@ async def test_method_create_with_all_params(self, async_client: AsyncLlamaStack
{
"name": "name",
"description": "description",
+ "input_schema": {"foo": True},
"metadata": {"foo": True},
- "parameters": [
- {
- "description": "description",
- "name": "name",
- "parameter_type": "parameter_type",
- "required": True,
- "default": True,
- "items": {},
- "title": "title",
- }
- ],
+ "output_schema": {"foo": True},
+ "toolgroup_id": "toolgroup_id",
}
],
"enable_session_persistence": True,
diff --git a/tests/api_resources/test_tools.py b/tests/api_resources/test_tools.py
index 3c1f0da4..6fafb9f9 100644
--- a/tests/api_resources/test_tools.py
+++ b/tests/api_resources/test_tools.py
@@ -9,7 +9,7 @@
from tests.utils import assert_matches_type
from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import Tool, ToolListResponse
+from llama_stack_client.types import ToolDef, ToolListResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -54,7 +54,7 @@ def test_method_get(self, client: LlamaStackClient) -> None:
tool = client.tools.get(
"tool_name",
)
- assert_matches_type(Tool, tool, path=["response"])
+ assert_matches_type(ToolDef, tool, path=["response"])
@parametrize
def test_raw_response_get(self, client: LlamaStackClient) -> None:
@@ -65,7 +65,7 @@ def test_raw_response_get(self, client: LlamaStackClient) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
tool = response.parse()
- assert_matches_type(Tool, tool, path=["response"])
+ assert_matches_type(ToolDef, tool, path=["response"])
@parametrize
def test_streaming_response_get(self, client: LlamaStackClient) -> None:
@@ -76,7 +76,7 @@ def test_streaming_response_get(self, client: LlamaStackClient) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
tool = response.parse()
- assert_matches_type(Tool, tool, path=["response"])
+ assert_matches_type(ToolDef, tool, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -130,7 +130,7 @@ async def test_method_get(self, async_client: AsyncLlamaStackClient) -> None:
tool = await async_client.tools.get(
"tool_name",
)
- assert_matches_type(Tool, tool, path=["response"])
+ assert_matches_type(ToolDef, tool, path=["response"])
@parametrize
async def test_raw_response_get(self, async_client: AsyncLlamaStackClient) -> None:
@@ -141,7 +141,7 @@ async def test_raw_response_get(self, async_client: AsyncLlamaStackClient) -> No
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
tool = await response.parse()
- assert_matches_type(Tool, tool, path=["response"])
+ assert_matches_type(ToolDef, tool, path=["response"])
@parametrize
async def test_streaming_response_get(self, async_client: AsyncLlamaStackClient) -> None:
@@ -152,7 +152,7 @@ async def test_streaming_response_get(self, async_client: AsyncLlamaStackClient)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
tool = await response.parse()
- assert_matches_type(Tool, tool, path=["response"])
+ assert_matches_type(ToolDef, tool, path=["response"])
assert cast(Any, response.is_closed) is True
From 406c36699f5618b0d2673ab38c93516aa403778f Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 2 Oct 2025 18:18:50 +0000
Subject: [PATCH 5/9] feat(api): fixes to URLs
---
.stats.yml | 4 +-
api.md | 38 +-
src/llama_stack_client/_client.py | 76 ++
src/llama_stack_client/resources/__init__.py | 28 +
.../resources/benchmarks.py | 359 ++++++++
src/llama_stack_client/resources/datasets.py | 676 ++++++++++++++
src/llama_stack_client/resources/telemetry.py | 740 +++++++++++++++-
src/llama_stack_client/types/__init__.py | 30 +-
src/llama_stack_client/types/benchmark.py | 28 +
.../types/benchmark_list_response.py | 10 +
.../types/benchmark_register_params.py | 30 +
.../types/dataset_appendrows_params.py | 13 +
.../types/dataset_iterrows_params.py | 15 +
.../types/dataset_iterrows_response.py | 18 +
.../types/dataset_list_response.py | 66 ++
.../types/dataset_register_params.py | 69 ++
.../types/dataset_register_response.py | 54 ++
.../types/dataset_retrieve_response.py | 54 ++
src/llama_stack_client/types/event_param.py | 115 ---
.../types/list_benchmarks_response.py | 10 +
.../types/list_datasets_response.py | 11 +
.../types/query_condition_param.py | 19 +
.../types/query_spans_response.py | 11 +
.../types/span_with_status.py | 35 +
.../types/telemetry_get_span_response.py | 31 +
.../types/telemetry_get_span_tree_params.py | 17 +
.../types/telemetry_get_span_tree_response.py | 10 +
.../types/telemetry_log_event_params.py | 17 -
.../types/telemetry_query_metrics_params.py | 36 +
.../types/telemetry_query_metrics_response.py | 45 +
.../types/telemetry_query_spans_params.py | 22 +
.../types/telemetry_query_spans_response.py | 35 +
.../types/telemetry_query_traces_params.py | 25 +
.../types/telemetry_query_traces_response.py | 10 +
.../telemetry_save_spans_to_dataset_params.py | 25 +
src/llama_stack_client/types/trace.py | 22 +
tests/api_resources/test_benchmarks.py | 248 ++++++
tests/api_resources/test_datasets.py | 521 +++++++++++
tests/api_resources/test_telemetry.py | 826 ++++++++++++++++--
39 files changed, 4138 insertions(+), 261 deletions(-)
create mode 100644 src/llama_stack_client/resources/benchmarks.py
create mode 100644 src/llama_stack_client/resources/datasets.py
create mode 100644 src/llama_stack_client/types/benchmark.py
create mode 100644 src/llama_stack_client/types/benchmark_list_response.py
create mode 100644 src/llama_stack_client/types/benchmark_register_params.py
create mode 100644 src/llama_stack_client/types/dataset_appendrows_params.py
create mode 100644 src/llama_stack_client/types/dataset_iterrows_params.py
create mode 100644 src/llama_stack_client/types/dataset_iterrows_response.py
create mode 100644 src/llama_stack_client/types/dataset_list_response.py
create mode 100644 src/llama_stack_client/types/dataset_register_params.py
create mode 100644 src/llama_stack_client/types/dataset_register_response.py
create mode 100644 src/llama_stack_client/types/dataset_retrieve_response.py
delete mode 100644 src/llama_stack_client/types/event_param.py
create mode 100644 src/llama_stack_client/types/list_benchmarks_response.py
create mode 100644 src/llama_stack_client/types/list_datasets_response.py
create mode 100644 src/llama_stack_client/types/query_condition_param.py
create mode 100644 src/llama_stack_client/types/query_spans_response.py
create mode 100644 src/llama_stack_client/types/span_with_status.py
create mode 100644 src/llama_stack_client/types/telemetry_get_span_response.py
create mode 100644 src/llama_stack_client/types/telemetry_get_span_tree_params.py
create mode 100644 src/llama_stack_client/types/telemetry_get_span_tree_response.py
delete mode 100644 src/llama_stack_client/types/telemetry_log_event_params.py
create mode 100644 src/llama_stack_client/types/telemetry_query_metrics_params.py
create mode 100644 src/llama_stack_client/types/telemetry_query_metrics_response.py
create mode 100644 src/llama_stack_client/types/telemetry_query_spans_params.py
create mode 100644 src/llama_stack_client/types/telemetry_query_spans_response.py
create mode 100644 src/llama_stack_client/types/telemetry_query_traces_params.py
create mode 100644 src/llama_stack_client/types/telemetry_query_traces_response.py
create mode 100644 src/llama_stack_client/types/telemetry_save_spans_to_dataset_params.py
create mode 100644 src/llama_stack_client/types/trace.py
create mode 100644 tests/api_resources/test_benchmarks.py
create mode 100644 tests/api_resources/test_datasets.py
diff --git a/.stats.yml b/.stats.yml
index 724604de..a88d26a5 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 93
+configured_endpoints: 108
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f26df77f0800baeaea40407776f6c1e618756037969411e29de209ce961655dd.yml
openapi_spec_hash: e7c2329edc0f9f5aa1c78b6afb996e1c
-config_hash: 0412cd40c0609550c1a47c69dd104e4f
+config_hash: 8800bdff1a087b9d5211dda2a7b9f66f
diff --git a/api.md b/api.md
index 0403c604..777cc6ff 100644
--- a/api.md
+++ b/api.md
@@ -108,9 +108,24 @@ Methods:
Types:
```python
-from llama_stack_client.types import ListDatasetsResponse
+from llama_stack_client.types import (
+ ListDatasetsResponse,
+ DatasetRetrieveResponse,
+ DatasetListResponse,
+ DatasetIterrowsResponse,
+ DatasetRegisterResponse,
+)
```
+Methods:
+
+- client.datasets.retrieve(dataset_id) -> DatasetRetrieveResponse
+- client.datasets.list() -> DatasetListResponse
+- client.datasets.appendrows(dataset_id, \*\*params) -> None
+- client.datasets.iterrows(dataset_id, \*\*params) -> DatasetIterrowsResponse
+- client.datasets.register(\*\*params) -> DatasetRegisterResponse
+- client.datasets.unregister(dataset_id) -> None
+
# Inspect
Types:
@@ -382,12 +397,23 @@ from llama_stack_client.types import (
QuerySpansResponse,
SpanWithStatus,
Trace,
+ TelemetryGetSpanResponse,
+ TelemetryGetSpanTreeResponse,
+ TelemetryQueryMetricsResponse,
+ TelemetryQuerySpansResponse,
+ TelemetryQueryTracesResponse,
)
```
Methods:
-- client.telemetry.log_event(\*\*params) -> None
+- client.telemetry.get_span(span_id, \*, trace_id) -> TelemetryGetSpanResponse
+- client.telemetry.get_span_tree(span_id, \*\*params) -> TelemetryGetSpanTreeResponse
+- client.telemetry.get_trace(trace_id) -> Trace
+- client.telemetry.query_metrics(metric_name, \*\*params) -> TelemetryQueryMetricsResponse
+- client.telemetry.query_spans(\*\*params) -> TelemetryQuerySpansResponse
+- client.telemetry.query_traces(\*\*params) -> TelemetryQueryTracesResponse
+- client.telemetry.save_spans_to_dataset(\*\*params) -> None
# Scoring
@@ -426,9 +452,15 @@ Methods:
Types:
```python
-from llama_stack_client.types import Benchmark, ListBenchmarksResponse
+from llama_stack_client.types import Benchmark, ListBenchmarksResponse, BenchmarkListResponse
```
+Methods:
+
+- client.benchmarks.retrieve(benchmark_id) -> Benchmark
+- client.benchmarks.list() -> BenchmarkListResponse
+- client.benchmarks.register(\*\*params) -> None
+
# Files
Types:
diff --git a/src/llama_stack_client/_client.py b/src/llama_stack_client/_client.py
index 6eebb18f..6b8f11b2 100644
--- a/src/llama_stack_client/_client.py
+++ b/src/llama_stack_client/_client.py
@@ -43,10 +43,12 @@
inspect,
scoring,
shields,
+ datasets,
providers,
responses,
telemetry,
vector_io,
+ benchmarks,
embeddings,
toolgroups,
vector_dbs,
@@ -64,10 +66,12 @@
from .resources.inspect import InspectResource, AsyncInspectResource
from .resources.scoring import ScoringResource, AsyncScoringResource
from .resources.shields import ShieldsResource, AsyncShieldsResource
+ from .resources.datasets import DatasetsResource, AsyncDatasetsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.providers import ProvidersResource, AsyncProvidersResource
from .resources.telemetry import TelemetryResource, AsyncTelemetryResource
from .resources.vector_io import VectorIoResource, AsyncVectorIoResource
+ from .resources.benchmarks import BenchmarksResource, AsyncBenchmarksResource
from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource
from .resources.toolgroups import ToolgroupsResource, AsyncToolgroupsResource
from .resources.vector_dbs import VectorDBsResource, AsyncVectorDBsResource
@@ -177,6 +181,12 @@ def responses(self) -> ResponsesResource:
return ResponsesResource(self)
+ @cached_property
+ def datasets(self) -> DatasetsResource:
+ from .resources.datasets import DatasetsResource
+
+ return DatasetsResource(self)
+
@cached_property
def inspect(self) -> InspectResource:
from .resources.inspect import InspectResource
@@ -279,6 +289,12 @@ def scoring_functions(self) -> ScoringFunctionsResource:
return ScoringFunctionsResource(self)
+ @cached_property
+ def benchmarks(self) -> BenchmarksResource:
+ from .resources.benchmarks import BenchmarksResource
+
+ return BenchmarksResource(self)
+
@cached_property
def files(self) -> FilesResource:
from .resources.files import FilesResource
@@ -487,6 +503,12 @@ def responses(self) -> AsyncResponsesResource:
return AsyncResponsesResource(self)
+ @cached_property
+ def datasets(self) -> AsyncDatasetsResource:
+ from .resources.datasets import AsyncDatasetsResource
+
+ return AsyncDatasetsResource(self)
+
@cached_property
def inspect(self) -> AsyncInspectResource:
from .resources.inspect import AsyncInspectResource
@@ -589,6 +611,12 @@ def scoring_functions(self) -> AsyncScoringFunctionsResource:
return AsyncScoringFunctionsResource(self)
+ @cached_property
+ def benchmarks(self) -> AsyncBenchmarksResource:
+ from .resources.benchmarks import AsyncBenchmarksResource
+
+ return AsyncBenchmarksResource(self)
+
@cached_property
def files(self) -> AsyncFilesResource:
from .resources.files import AsyncFilesResource
@@ -746,6 +774,12 @@ def responses(self) -> responses.ResponsesResourceWithRawResponse:
return ResponsesResourceWithRawResponse(self._client.responses)
+ @cached_property
+ def datasets(self) -> datasets.DatasetsResourceWithRawResponse:
+ from .resources.datasets import DatasetsResourceWithRawResponse
+
+ return DatasetsResourceWithRawResponse(self._client.datasets)
+
@cached_property
def inspect(self) -> inspect.InspectResourceWithRawResponse:
from .resources.inspect import InspectResourceWithRawResponse
@@ -848,6 +882,12 @@ def scoring_functions(self) -> scoring_functions.ScoringFunctionsResourceWithRaw
return ScoringFunctionsResourceWithRawResponse(self._client.scoring_functions)
+ @cached_property
+ def benchmarks(self) -> benchmarks.BenchmarksResourceWithRawResponse:
+ from .resources.benchmarks import BenchmarksResourceWithRawResponse
+
+ return BenchmarksResourceWithRawResponse(self._client.benchmarks)
+
@cached_property
def files(self) -> files.FilesResourceWithRawResponse:
from .resources.files import FilesResourceWithRawResponse
@@ -891,6 +931,12 @@ def responses(self) -> responses.AsyncResponsesResourceWithRawResponse:
return AsyncResponsesResourceWithRawResponse(self._client.responses)
+ @cached_property
+ def datasets(self) -> datasets.AsyncDatasetsResourceWithRawResponse:
+ from .resources.datasets import AsyncDatasetsResourceWithRawResponse
+
+ return AsyncDatasetsResourceWithRawResponse(self._client.datasets)
+
@cached_property
def inspect(self) -> inspect.AsyncInspectResourceWithRawResponse:
from .resources.inspect import AsyncInspectResourceWithRawResponse
@@ -995,6 +1041,12 @@ def scoring_functions(self) -> scoring_functions.AsyncScoringFunctionsResourceWi
return AsyncScoringFunctionsResourceWithRawResponse(self._client.scoring_functions)
+ @cached_property
+ def benchmarks(self) -> benchmarks.AsyncBenchmarksResourceWithRawResponse:
+ from .resources.benchmarks import AsyncBenchmarksResourceWithRawResponse
+
+ return AsyncBenchmarksResourceWithRawResponse(self._client.benchmarks)
+
@cached_property
def files(self) -> files.AsyncFilesResourceWithRawResponse:
from .resources.files import AsyncFilesResourceWithRawResponse
@@ -1038,6 +1090,12 @@ def responses(self) -> responses.ResponsesResourceWithStreamingResponse:
return ResponsesResourceWithStreamingResponse(self._client.responses)
+ @cached_property
+ def datasets(self) -> datasets.DatasetsResourceWithStreamingResponse:
+ from .resources.datasets import DatasetsResourceWithStreamingResponse
+
+ return DatasetsResourceWithStreamingResponse(self._client.datasets)
+
@cached_property
def inspect(self) -> inspect.InspectResourceWithStreamingResponse:
from .resources.inspect import InspectResourceWithStreamingResponse
@@ -1142,6 +1200,12 @@ def scoring_functions(self) -> scoring_functions.ScoringFunctionsResourceWithStr
return ScoringFunctionsResourceWithStreamingResponse(self._client.scoring_functions)
+ @cached_property
+ def benchmarks(self) -> benchmarks.BenchmarksResourceWithStreamingResponse:
+ from .resources.benchmarks import BenchmarksResourceWithStreamingResponse
+
+ return BenchmarksResourceWithStreamingResponse(self._client.benchmarks)
+
@cached_property
def files(self) -> files.FilesResourceWithStreamingResponse:
from .resources.files import FilesResourceWithStreamingResponse
@@ -1185,6 +1249,12 @@ def responses(self) -> responses.AsyncResponsesResourceWithStreamingResponse:
return AsyncResponsesResourceWithStreamingResponse(self._client.responses)
+ @cached_property
+ def datasets(self) -> datasets.AsyncDatasetsResourceWithStreamingResponse:
+ from .resources.datasets import AsyncDatasetsResourceWithStreamingResponse
+
+ return AsyncDatasetsResourceWithStreamingResponse(self._client.datasets)
+
@cached_property
def inspect(self) -> inspect.AsyncInspectResourceWithStreamingResponse:
from .resources.inspect import AsyncInspectResourceWithStreamingResponse
@@ -1289,6 +1359,12 @@ def scoring_functions(self) -> scoring_functions.AsyncScoringFunctionsResourceWi
return AsyncScoringFunctionsResourceWithStreamingResponse(self._client.scoring_functions)
+ @cached_property
+ def benchmarks(self) -> benchmarks.AsyncBenchmarksResourceWithStreamingResponse:
+ from .resources.benchmarks import AsyncBenchmarksResourceWithStreamingResponse
+
+ return AsyncBenchmarksResourceWithStreamingResponse(self._client.benchmarks)
+
@cached_property
def files(self) -> files.AsyncFilesResourceWithStreamingResponse:
from .resources.files import AsyncFilesResourceWithStreamingResponse
diff --git a/src/llama_stack_client/resources/__init__.py b/src/llama_stack_client/resources/__init__.py
index 27e4b3c0..3089ae21 100644
--- a/src/llama_stack_client/resources/__init__.py
+++ b/src/llama_stack_client/resources/__init__.py
@@ -80,6 +80,14 @@
ShieldsResourceWithStreamingResponse,
AsyncShieldsResourceWithStreamingResponse,
)
+from .datasets import (
+ DatasetsResource,
+ AsyncDatasetsResource,
+ DatasetsResourceWithRawResponse,
+ AsyncDatasetsResourceWithRawResponse,
+ DatasetsResourceWithStreamingResponse,
+ AsyncDatasetsResourceWithStreamingResponse,
+)
from .providers import (
ProvidersResource,
AsyncProvidersResource,
@@ -112,6 +120,14 @@
VectorIoResourceWithStreamingResponse,
AsyncVectorIoResourceWithStreamingResponse,
)
+from .benchmarks import (
+ BenchmarksResource,
+ AsyncBenchmarksResource,
+ BenchmarksResourceWithRawResponse,
+ AsyncBenchmarksResourceWithRawResponse,
+ BenchmarksResourceWithStreamingResponse,
+ AsyncBenchmarksResourceWithStreamingResponse,
+)
from .embeddings import (
EmbeddingsResource,
AsyncEmbeddingsResource,
@@ -210,6 +226,12 @@
"AsyncResponsesResourceWithRawResponse",
"ResponsesResourceWithStreamingResponse",
"AsyncResponsesResourceWithStreamingResponse",
+ "DatasetsResource",
+ "AsyncDatasetsResource",
+ "DatasetsResourceWithRawResponse",
+ "AsyncDatasetsResourceWithRawResponse",
+ "DatasetsResourceWithStreamingResponse",
+ "AsyncDatasetsResourceWithStreamingResponse",
"InspectResource",
"AsyncInspectResource",
"InspectResourceWithRawResponse",
@@ -312,6 +334,12 @@
"AsyncScoringFunctionsResourceWithRawResponse",
"ScoringFunctionsResourceWithStreamingResponse",
"AsyncScoringFunctionsResourceWithStreamingResponse",
+ "BenchmarksResource",
+ "AsyncBenchmarksResource",
+ "BenchmarksResourceWithRawResponse",
+ "AsyncBenchmarksResourceWithRawResponse",
+ "BenchmarksResourceWithStreamingResponse",
+ "AsyncBenchmarksResourceWithStreamingResponse",
"FilesResource",
"AsyncFilesResource",
"FilesResourceWithRawResponse",
diff --git a/src/llama_stack_client/resources/benchmarks.py b/src/llama_stack_client/resources/benchmarks.py
new file mode 100644
index 00000000..3d33bdcf
--- /dev/null
+++ b/src/llama_stack_client/resources/benchmarks.py
@@ -0,0 +1,359 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Type, Union, Iterable, cast
+
+import httpx
+
+from ..types import benchmark_register_params
+from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
+from .._utils import maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._wrappers import DataWrapper
+from .._base_client import make_request_options
+from ..types.benchmark import Benchmark
+from ..types.benchmark_list_response import BenchmarkListResponse
+
+__all__ = ["BenchmarksResource", "AsyncBenchmarksResource"]
+
+
+class BenchmarksResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> BenchmarksResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ """
+ return BenchmarksResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> BenchmarksResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ """
+ return BenchmarksResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ benchmark_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Benchmark:
+ """
+ Get a benchmark by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not benchmark_id:
+ raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
+ return self._get(
+ f"/v1alpha/eval/benchmarks/{benchmark_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Benchmark,
+ )
+
+ def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> BenchmarkListResponse:
+ """List all benchmarks."""
+ return self._get(
+ "/v1alpha/eval/benchmarks",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ post_parser=DataWrapper[BenchmarkListResponse]._unwrapper,
+ ),
+ cast_to=cast(Type[BenchmarkListResponse], DataWrapper[BenchmarkListResponse]),
+ )
+
+ def register(
+ self,
+ *,
+ benchmark_id: str,
+ dataset_id: str,
+ scoring_functions: SequenceNotStr[str],
+ metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | Omit = omit,
+ provider_benchmark_id: str | Omit = omit,
+ provider_id: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ Register a benchmark.
+
+ Args:
+ benchmark_id: The ID of the benchmark to register.
+
+ dataset_id: The ID of the dataset to use for the benchmark.
+
+ scoring_functions: The scoring functions to use for the benchmark.
+
+ metadata: The metadata to use for the benchmark.
+
+ provider_benchmark_id: The ID of the provider benchmark to use for the benchmark.
+
+ provider_id: The ID of the provider to use for the benchmark.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._post(
+ "/v1alpha/eval/benchmarks",
+ body=maybe_transform(
+ {
+ "benchmark_id": benchmark_id,
+ "dataset_id": dataset_id,
+ "scoring_functions": scoring_functions,
+ "metadata": metadata,
+ "provider_benchmark_id": provider_benchmark_id,
+ "provider_id": provider_id,
+ },
+ benchmark_register_params.BenchmarkRegisterParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncBenchmarksResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncBenchmarksResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncBenchmarksResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncBenchmarksResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ """
+ return AsyncBenchmarksResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ benchmark_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Benchmark:
+ """
+ Get a benchmark by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not benchmark_id:
+ raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
+ return await self._get(
+ f"/v1alpha/eval/benchmarks/{benchmark_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Benchmark,
+ )
+
+ async def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> BenchmarkListResponse:
+ """List all benchmarks."""
+ return await self._get(
+ "/v1alpha/eval/benchmarks",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ post_parser=DataWrapper[BenchmarkListResponse]._unwrapper,
+ ),
+ cast_to=cast(Type[BenchmarkListResponse], DataWrapper[BenchmarkListResponse]),
+ )
+
+ async def register(
+ self,
+ *,
+ benchmark_id: str,
+ dataset_id: str,
+ scoring_functions: SequenceNotStr[str],
+ metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | Omit = omit,
+ provider_benchmark_id: str | Omit = omit,
+ provider_id: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ Register a benchmark.
+
+ Args:
+ benchmark_id: The ID of the benchmark to register.
+
+ dataset_id: The ID of the dataset to use for the benchmark.
+
+ scoring_functions: The scoring functions to use for the benchmark.
+
+ metadata: The metadata to use for the benchmark.
+
+ provider_benchmark_id: The ID of the provider benchmark to use for the benchmark.
+
+ provider_id: The ID of the provider to use for the benchmark.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._post(
+ "/v1alpha/eval/benchmarks",
+ body=await async_maybe_transform(
+ {
+ "benchmark_id": benchmark_id,
+ "dataset_id": dataset_id,
+ "scoring_functions": scoring_functions,
+ "metadata": metadata,
+ "provider_benchmark_id": provider_benchmark_id,
+ "provider_id": provider_id,
+ },
+ benchmark_register_params.BenchmarkRegisterParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class BenchmarksResourceWithRawResponse:
+ def __init__(self, benchmarks: BenchmarksResource) -> None:
+ self._benchmarks = benchmarks
+
+ self.retrieve = to_raw_response_wrapper(
+ benchmarks.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ benchmarks.list,
+ )
+ self.register = to_raw_response_wrapper(
+ benchmarks.register,
+ )
+
+
+class AsyncBenchmarksResourceWithRawResponse:
+ def __init__(self, benchmarks: AsyncBenchmarksResource) -> None:
+ self._benchmarks = benchmarks
+
+ self.retrieve = async_to_raw_response_wrapper(
+ benchmarks.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ benchmarks.list,
+ )
+ self.register = async_to_raw_response_wrapper(
+ benchmarks.register,
+ )
+
+
+class BenchmarksResourceWithStreamingResponse:
+ def __init__(self, benchmarks: BenchmarksResource) -> None:
+ self._benchmarks = benchmarks
+
+ self.retrieve = to_streamed_response_wrapper(
+ benchmarks.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ benchmarks.list,
+ )
+ self.register = to_streamed_response_wrapper(
+ benchmarks.register,
+ )
+
+
+class AsyncBenchmarksResourceWithStreamingResponse:
+ def __init__(self, benchmarks: AsyncBenchmarksResource) -> None:
+ self._benchmarks = benchmarks
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ benchmarks.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ benchmarks.list,
+ )
+ self.register = async_to_streamed_response_wrapper(
+ benchmarks.register,
+ )
diff --git a/src/llama_stack_client/resources/datasets.py b/src/llama_stack_client/resources/datasets.py
new file mode 100644
index 00000000..5824287c
--- /dev/null
+++ b/src/llama_stack_client/resources/datasets.py
@@ -0,0 +1,676 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Type, Union, Iterable, cast
+from typing_extensions import Literal
+
+import httpx
+
+from ..types import dataset_iterrows_params, dataset_register_params, dataset_appendrows_params
+from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
+from .._utils import maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._wrappers import DataWrapper
+from .._base_client import make_request_options
+from ..types.dataset_list_response import DatasetListResponse
+from ..types.dataset_iterrows_response import DatasetIterrowsResponse
+from ..types.dataset_register_response import DatasetRegisterResponse
+from ..types.dataset_retrieve_response import DatasetRetrieveResponse
+
+__all__ = ["DatasetsResource", "AsyncDatasetsResource"]
+
+
+class DatasetsResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> DatasetsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ """
+ return DatasetsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> DatasetsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ """
+ return DatasetsResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ dataset_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DatasetRetrieveResponse:
+ """
+ Get a dataset by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not dataset_id:
+ raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
+ return self._get(
+ f"/v1beta/datasets/{dataset_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DatasetRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DatasetListResponse:
+ """List all datasets."""
+ return self._get(
+ "/v1beta/datasets",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ post_parser=DataWrapper[DatasetListResponse]._unwrapper,
+ ),
+ cast_to=cast(Type[DatasetListResponse], DataWrapper[DatasetListResponse]),
+ )
+
+ def appendrows(
+ self,
+ dataset_id: str,
+ *,
+ rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ Append rows to a dataset.
+
+ Args:
+ rows: The rows to append to the dataset.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not dataset_id:
+ raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._post(
+ f"/v1beta/datasetio/append-rows/{dataset_id}",
+ body=maybe_transform({"rows": rows}, dataset_appendrows_params.DatasetAppendrowsParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ def iterrows(
+ self,
+ dataset_id: str,
+ *,
+ limit: int | Omit = omit,
+ start_index: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DatasetIterrowsResponse:
+ """Get a paginated list of rows from a dataset.
+
+ Uses offset-based pagination where:
+
+ - start_index: The starting index (0-based). If None, starts from beginning.
+ - limit: Number of items to return. If None or -1, returns all items.
+
+ The response includes:
+
+ - data: List of items for the current page.
+ - has_more: Whether there are more items available after this set.
+
+ Args:
+ limit: The number of rows to get.
+
+ start_index: Index into dataset for the first row to get. Get all rows if None.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not dataset_id:
+ raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
+ return self._get(
+ f"/v1beta/datasetio/iterrows/{dataset_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "limit": limit,
+ "start_index": start_index,
+ },
+ dataset_iterrows_params.DatasetIterrowsParams,
+ ),
+ ),
+ cast_to=DatasetIterrowsResponse,
+ )
+
+ def register(
+ self,
+ *,
+ purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"],
+ source: dataset_register_params.Source,
+ dataset_id: str | Omit = omit,
+ metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DatasetRegisterResponse:
+ """Register a new dataset.
+
+ Args:
+ purpose: The purpose of the dataset.
+
+ One of: - "post-training/messages": The dataset
+ contains a messages column with list of messages for post-training. {
+ "messages": [ {"role": "user", "content": "Hello, world!"}, {"role":
+ "assistant", "content": "Hello, world!"}, ] } - "eval/question-answer": The
+ dataset contains a question column and an answer column for evaluation. {
+ "question": "What is the capital of France?", "answer": "Paris" } -
+ "eval/messages-answer": The dataset contains a messages column with list of
+ messages and an answer column for evaluation. { "messages": [ {"role": "user",
+ "content": "Hello, my name is John Doe."}, {"role": "assistant", "content":
+ "Hello, John Doe. How can I help you today?"}, {"role": "user", "content":
+ "What's my name?"}, ], "answer": "John Doe" }
+
+ source: The data source of the dataset. Ensure that the data source schema is compatible
+ with the purpose of the dataset. Examples: - { "type": "uri", "uri":
+ "https://mywebsite.com/mydata.jsonl" } - { "type": "uri", "uri":
+ "lsfs://mydata.jsonl" } - { "type": "uri", "uri":
+ "data:csv;base64,{base64_content}" } - { "type": "uri", "uri":
+ "huggingface://llamastack/simpleqa?split=train" } - { "type": "rows", "rows": [
+ { "messages": [ {"role": "user", "content": "Hello, world!"}, {"role":
+ "assistant", "content": "Hello, world!"}, ] } ] }
+
+ dataset_id: The ID of the dataset. If not provided, an ID will be generated.
+
+ metadata: The metadata for the dataset. - E.g. {"description": "My dataset"}.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v1beta/datasets",
+ body=maybe_transform(
+ {
+ "purpose": purpose,
+ "source": source,
+ "dataset_id": dataset_id,
+ "metadata": metadata,
+ },
+ dataset_register_params.DatasetRegisterParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DatasetRegisterResponse,
+ )
+
+ def unregister(
+ self,
+ dataset_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ Unregister a dataset by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not dataset_id:
+ raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v1beta/datasets/{dataset_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncDatasetsResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncDatasetsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncDatasetsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncDatasetsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ """
+ return AsyncDatasetsResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ dataset_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DatasetRetrieveResponse:
+ """
+ Get a dataset by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not dataset_id:
+ raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
+ return await self._get(
+ f"/v1beta/datasets/{dataset_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DatasetRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DatasetListResponse:
+ """List all datasets."""
+ return await self._get(
+ "/v1beta/datasets",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ post_parser=DataWrapper[DatasetListResponse]._unwrapper,
+ ),
+ cast_to=cast(Type[DatasetListResponse], DataWrapper[DatasetListResponse]),
+ )
+
+ async def appendrows(
+ self,
+ dataset_id: str,
+ *,
+ rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ Append rows to a dataset.
+
+ Args:
+ rows: The rows to append to the dataset.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not dataset_id:
+ raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._post(
+ f"/v1beta/datasetio/append-rows/{dataset_id}",
+ body=await async_maybe_transform({"rows": rows}, dataset_appendrows_params.DatasetAppendrowsParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+ async def iterrows(
+ self,
+ dataset_id: str,
+ *,
+ limit: int | Omit = omit,
+ start_index: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DatasetIterrowsResponse:
+ """Get a paginated list of rows from a dataset.
+
+ Uses offset-based pagination where:
+
+ - start_index: The starting index (0-based). If None, starts from beginning.
+ - limit: Number of items to return. If None or -1, returns all items.
+
+ The response includes:
+
+ - data: List of items for the current page.
+ - has_more: Whether there are more items available after this set.
+
+ Args:
+ limit: The number of rows to get.
+
+ start_index: Index into dataset for the first row to get. Get all rows if None.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not dataset_id:
+ raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
+ return await self._get(
+ f"/v1beta/datasetio/iterrows/{dataset_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "limit": limit,
+ "start_index": start_index,
+ },
+ dataset_iterrows_params.DatasetIterrowsParams,
+ ),
+ ),
+ cast_to=DatasetIterrowsResponse,
+ )
+
+ async def register(
+ self,
+ *,
+ purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"],
+ source: dataset_register_params.Source,
+ dataset_id: str | Omit = omit,
+ metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DatasetRegisterResponse:
+ """Register a new dataset.
+
+ Args:
+ purpose: The purpose of the dataset.
+
+ One of: - "post-training/messages": The dataset
+ contains a messages column with list of messages for post-training. {
+ "messages": [ {"role": "user", "content": "Hello, world!"}, {"role":
+ "assistant", "content": "Hello, world!"}, ] } - "eval/question-answer": The
+ dataset contains a question column and an answer column for evaluation. {
+ "question": "What is the capital of France?", "answer": "Paris" } -
+ "eval/messages-answer": The dataset contains a messages column with list of
+ messages and an answer column for evaluation. { "messages": [ {"role": "user",
+ "content": "Hello, my name is John Doe."}, {"role": "assistant", "content":
+ "Hello, John Doe. How can I help you today?"}, {"role": "user", "content":
+ "What's my name?"}, ], "answer": "John Doe" }
+
+ source: The data source of the dataset. Ensure that the data source schema is compatible
+ with the purpose of the dataset. Examples: - { "type": "uri", "uri":
+ "https://mywebsite.com/mydata.jsonl" } - { "type": "uri", "uri":
+ "lsfs://mydata.jsonl" } - { "type": "uri", "uri":
+ "data:csv;base64,{base64_content}" } - { "type": "uri", "uri":
+ "huggingface://llamastack/simpleqa?split=train" } - { "type": "rows", "rows": [
+ { "messages": [ {"role": "user", "content": "Hello, world!"}, {"role":
+ "assistant", "content": "Hello, world!"}, ] } ] }
+
+ dataset_id: The ID of the dataset. If not provided, an ID will be generated.
+
+ metadata: The metadata for the dataset. - E.g. {"description": "My dataset"}.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v1beta/datasets",
+ body=await async_maybe_transform(
+ {
+ "purpose": purpose,
+ "source": source,
+ "dataset_id": dataset_id,
+ "metadata": metadata,
+ },
+ dataset_register_params.DatasetRegisterParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DatasetRegisterResponse,
+ )
+
+ async def unregister(
+ self,
+ dataset_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ Unregister a dataset by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not dataset_id:
+ raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v1beta/datasets/{dataset_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class DatasetsResourceWithRawResponse:
+ def __init__(self, datasets: DatasetsResource) -> None:
+ self._datasets = datasets
+
+ self.retrieve = to_raw_response_wrapper(
+ datasets.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ datasets.list,
+ )
+ self.appendrows = to_raw_response_wrapper(
+ datasets.appendrows,
+ )
+ self.iterrows = to_raw_response_wrapper(
+ datasets.iterrows,
+ )
+ self.register = to_raw_response_wrapper(
+ datasets.register,
+ )
+ self.unregister = to_raw_response_wrapper(
+ datasets.unregister,
+ )
+
+
+class AsyncDatasetsResourceWithRawResponse:
+ def __init__(self, datasets: AsyncDatasetsResource) -> None:
+ self._datasets = datasets
+
+ self.retrieve = async_to_raw_response_wrapper(
+ datasets.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ datasets.list,
+ )
+ self.appendrows = async_to_raw_response_wrapper(
+ datasets.appendrows,
+ )
+ self.iterrows = async_to_raw_response_wrapper(
+ datasets.iterrows,
+ )
+ self.register = async_to_raw_response_wrapper(
+ datasets.register,
+ )
+ self.unregister = async_to_raw_response_wrapper(
+ datasets.unregister,
+ )
+
+
+class DatasetsResourceWithStreamingResponse:
+ def __init__(self, datasets: DatasetsResource) -> None:
+ self._datasets = datasets
+
+ self.retrieve = to_streamed_response_wrapper(
+ datasets.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ datasets.list,
+ )
+ self.appendrows = to_streamed_response_wrapper(
+ datasets.appendrows,
+ )
+ self.iterrows = to_streamed_response_wrapper(
+ datasets.iterrows,
+ )
+ self.register = to_streamed_response_wrapper(
+ datasets.register,
+ )
+ self.unregister = to_streamed_response_wrapper(
+ datasets.unregister,
+ )
+
+
+class AsyncDatasetsResourceWithStreamingResponse:
+ def __init__(self, datasets: AsyncDatasetsResource) -> None:
+ self._datasets = datasets
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ datasets.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ datasets.list,
+ )
+ self.appendrows = async_to_streamed_response_wrapper(
+ datasets.appendrows,
+ )
+ self.iterrows = async_to_streamed_response_wrapper(
+ datasets.iterrows,
+ )
+ self.register = async_to_streamed_response_wrapper(
+ datasets.register,
+ )
+ self.unregister = async_to_streamed_response_wrapper(
+ datasets.unregister,
+ )
diff --git a/src/llama_stack_client/resources/telemetry.py b/src/llama_stack_client/resources/telemetry.py
index 7210c834..972b8fdf 100644
--- a/src/llama_stack_client/resources/telemetry.py
+++ b/src/llama_stack_client/resources/telemetry.py
@@ -2,10 +2,19 @@
from __future__ import annotations
+from typing import Type, Iterable, cast
+from typing_extensions import Literal
+
import httpx
-from ..types import telemetry_log_event_params
-from .._types import Body, Query, Headers, NoneType, NotGiven, not_given
+from ..types import (
+ telemetry_query_spans_params,
+ telemetry_query_traces_params,
+ telemetry_get_span_tree_params,
+ telemetry_query_metrics_params,
+ telemetry_save_spans_to_dataset_params,
+)
+from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
from .._utils import maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
@@ -15,8 +24,15 @@
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
+from .._wrappers import DataWrapper
+from ..types.trace import Trace
from .._base_client import make_request_options
-from ..types.event_param import EventParam
+from ..types.query_condition_param import QueryConditionParam
+from ..types.telemetry_get_span_response import TelemetryGetSpanResponse
+from ..types.telemetry_query_spans_response import TelemetryQuerySpansResponse
+from ..types.telemetry_query_traces_response import TelemetryQueryTracesResponse
+from ..types.telemetry_get_span_tree_response import TelemetryGetSpanTreeResponse
+from ..types.telemetry_query_metrics_response import TelemetryQueryMetricsResponse
__all__ = ["TelemetryResource", "AsyncTelemetryResource"]
@@ -41,11 +57,300 @@ def with_streaming_response(self) -> TelemetryResourceWithStreamingResponse:
"""
return TelemetryResourceWithStreamingResponse(self)
- def log_event(
+ def get_span(
+ self,
+ span_id: str,
+ *,
+ trace_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> TelemetryGetSpanResponse:
+ """
+ Get a span by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not trace_id:
+ raise ValueError(f"Expected a non-empty value for `trace_id` but received {trace_id!r}")
+ if not span_id:
+ raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}")
+ return self._get(
+ f"/v1alpha/telemetry/traces/{trace_id}/spans/{span_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=TelemetryGetSpanResponse,
+ )
+
+ def get_span_tree(
self,
+ span_id: str,
*,
- event: EventParam,
- ttl_seconds: int,
+ attributes_to_return: SequenceNotStr[str] | Omit = omit,
+ max_depth: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> TelemetryGetSpanTreeResponse:
+ """
+ Get a span tree by its ID.
+
+ Args:
+ attributes_to_return: The attributes to return in the tree.
+
+ max_depth: The maximum depth of the tree.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not span_id:
+ raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}")
+ return self._post(
+ f"/v1alpha/telemetry/spans/{span_id}/tree",
+ body=maybe_transform(
+ {
+ "attributes_to_return": attributes_to_return,
+ "max_depth": max_depth,
+ },
+ telemetry_get_span_tree_params.TelemetryGetSpanTreeParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ post_parser=DataWrapper[TelemetryGetSpanTreeResponse]._unwrapper,
+ ),
+ cast_to=cast(Type[TelemetryGetSpanTreeResponse], DataWrapper[TelemetryGetSpanTreeResponse]),
+ )
+
+ def get_trace(
+ self,
+ trace_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Trace:
+ """
+ Get a trace by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not trace_id:
+ raise ValueError(f"Expected a non-empty value for `trace_id` but received {trace_id!r}")
+ return self._get(
+ f"/v1alpha/telemetry/traces/{trace_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Trace,
+ )
+
+ def query_metrics(
+ self,
+ metric_name: str,
+ *,
+ query_type: Literal["range", "instant"],
+ start_time: int,
+ end_time: int | Omit = omit,
+ granularity: str | Omit = omit,
+ label_matchers: Iterable[telemetry_query_metrics_params.LabelMatcher] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> TelemetryQueryMetricsResponse:
+ """
+ Query metrics.
+
+ Args:
+ query_type: The type of query to perform.
+
+ start_time: The start time of the metric to query.
+
+ end_time: The end time of the metric to query.
+
+ granularity: The granularity of the metric to query.
+
+ label_matchers: The label matchers to apply to the metric.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not metric_name:
+ raise ValueError(f"Expected a non-empty value for `metric_name` but received {metric_name!r}")
+ return self._post(
+ f"/v1alpha/telemetry/metrics/{metric_name}",
+ body=maybe_transform(
+ {
+ "query_type": query_type,
+ "start_time": start_time,
+ "end_time": end_time,
+ "granularity": granularity,
+ "label_matchers": label_matchers,
+ },
+ telemetry_query_metrics_params.TelemetryQueryMetricsParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ post_parser=DataWrapper[TelemetryQueryMetricsResponse]._unwrapper,
+ ),
+ cast_to=cast(Type[TelemetryQueryMetricsResponse], DataWrapper[TelemetryQueryMetricsResponse]),
+ )
+
+ def query_spans(
+ self,
+ *,
+ attribute_filters: Iterable[QueryConditionParam],
+ attributes_to_return: SequenceNotStr[str],
+ max_depth: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> TelemetryQuerySpansResponse:
+ """
+ Query spans.
+
+ Args:
+ attribute_filters: The attribute filters to apply to the spans.
+
+ attributes_to_return: The attributes to return in the spans.
+
+ max_depth: The maximum depth of the tree.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v1alpha/telemetry/spans",
+ body=maybe_transform(
+ {
+ "attribute_filters": attribute_filters,
+ "attributes_to_return": attributes_to_return,
+ "max_depth": max_depth,
+ },
+ telemetry_query_spans_params.TelemetryQuerySpansParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ post_parser=DataWrapper[TelemetryQuerySpansResponse]._unwrapper,
+ ),
+ cast_to=cast(Type[TelemetryQuerySpansResponse], DataWrapper[TelemetryQuerySpansResponse]),
+ )
+
+ def query_traces(
+ self,
+ *,
+ attribute_filters: Iterable[QueryConditionParam] | Omit = omit,
+ limit: int | Omit = omit,
+ offset: int | Omit = omit,
+ order_by: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> TelemetryQueryTracesResponse:
+ """
+ Query traces.
+
+ Args:
+ attribute_filters: The attribute filters to apply to the traces.
+
+ limit: The limit of traces to return.
+
+ offset: The offset of the traces to return.
+
+ order_by: The order by of the traces to return.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v1alpha/telemetry/traces",
+ body=maybe_transform(
+ {
+ "attribute_filters": attribute_filters,
+ "limit": limit,
+ "offset": offset,
+ "order_by": order_by,
+ },
+ telemetry_query_traces_params.TelemetryQueryTracesParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ post_parser=DataWrapper[TelemetryQueryTracesResponse]._unwrapper,
+ ),
+ cast_to=cast(Type[TelemetryQueryTracesResponse], DataWrapper[TelemetryQueryTracesResponse]),
+ )
+
+ def save_spans_to_dataset(
+ self,
+ *,
+ attribute_filters: Iterable[QueryConditionParam],
+ attributes_to_save: SequenceNotStr[str],
+ dataset_id: str,
+ max_depth: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -54,12 +359,16 @@ def log_event(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
- Log an event.
+ Save spans to a dataset.
Args:
- event: The event to log.
+ attribute_filters: The attribute filters to apply to the spans.
+
+ attributes_to_save: The attributes to save to the dataset.
+
+ dataset_id: The ID of the dataset to save the spans to.
- ttl_seconds: The time to live of the event.
+ max_depth: The maximum depth of the tree.
extra_headers: Send extra headers
@@ -71,13 +380,15 @@ def log_event(
"""
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
- "/v1/telemetry/events",
+ "/v1alpha/telemetry/spans/export",
body=maybe_transform(
{
- "event": event,
- "ttl_seconds": ttl_seconds,
+ "attribute_filters": attribute_filters,
+ "attributes_to_save": attributes_to_save,
+ "dataset_id": dataset_id,
+ "max_depth": max_depth,
},
- telemetry_log_event_params.TelemetryLogEventParams,
+ telemetry_save_spans_to_dataset_params.TelemetrySaveSpansToDatasetParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -106,11 +417,300 @@ def with_streaming_response(self) -> AsyncTelemetryResourceWithStreamingResponse
"""
return AsyncTelemetryResourceWithStreamingResponse(self)
- async def log_event(
+ async def get_span(
+ self,
+ span_id: str,
+ *,
+ trace_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> TelemetryGetSpanResponse:
+ """
+ Get a span by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not trace_id:
+ raise ValueError(f"Expected a non-empty value for `trace_id` but received {trace_id!r}")
+ if not span_id:
+ raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}")
+ return await self._get(
+ f"/v1alpha/telemetry/traces/{trace_id}/spans/{span_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=TelemetryGetSpanResponse,
+ )
+
+ async def get_span_tree(
+ self,
+ span_id: str,
+ *,
+ attributes_to_return: SequenceNotStr[str] | Omit = omit,
+ max_depth: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> TelemetryGetSpanTreeResponse:
+ """
+ Get a span tree by its ID.
+
+ Args:
+ attributes_to_return: The attributes to return in the tree.
+
+ max_depth: The maximum depth of the tree.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not span_id:
+ raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}")
+ return await self._post(
+ f"/v1alpha/telemetry/spans/{span_id}/tree",
+ body=await async_maybe_transform(
+ {
+ "attributes_to_return": attributes_to_return,
+ "max_depth": max_depth,
+ },
+ telemetry_get_span_tree_params.TelemetryGetSpanTreeParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ post_parser=DataWrapper[TelemetryGetSpanTreeResponse]._unwrapper,
+ ),
+ cast_to=cast(Type[TelemetryGetSpanTreeResponse], DataWrapper[TelemetryGetSpanTreeResponse]),
+ )
+
+ async def get_trace(
self,
+ trace_id: str,
*,
- event: EventParam,
- ttl_seconds: int,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Trace:
+ """
+ Get a trace by its ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not trace_id:
+ raise ValueError(f"Expected a non-empty value for `trace_id` but received {trace_id!r}")
+ return await self._get(
+ f"/v1alpha/telemetry/traces/{trace_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Trace,
+ )
+
+ async def query_metrics(
+ self,
+ metric_name: str,
+ *,
+ query_type: Literal["range", "instant"],
+ start_time: int,
+ end_time: int | Omit = omit,
+ granularity: str | Omit = omit,
+ label_matchers: Iterable[telemetry_query_metrics_params.LabelMatcher] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> TelemetryQueryMetricsResponse:
+ """
+ Query metrics.
+
+ Args:
+ query_type: The type of query to perform.
+
+ start_time: The start time of the metric to query.
+
+ end_time: The end time of the metric to query.
+
+ granularity: The granularity of the metric to query.
+
+ label_matchers: The label matchers to apply to the metric.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not metric_name:
+ raise ValueError(f"Expected a non-empty value for `metric_name` but received {metric_name!r}")
+ return await self._post(
+ f"/v1alpha/telemetry/metrics/{metric_name}",
+ body=await async_maybe_transform(
+ {
+ "query_type": query_type,
+ "start_time": start_time,
+ "end_time": end_time,
+ "granularity": granularity,
+ "label_matchers": label_matchers,
+ },
+ telemetry_query_metrics_params.TelemetryQueryMetricsParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ post_parser=DataWrapper[TelemetryQueryMetricsResponse]._unwrapper,
+ ),
+ cast_to=cast(Type[TelemetryQueryMetricsResponse], DataWrapper[TelemetryQueryMetricsResponse]),
+ )
+
+ async def query_spans(
+ self,
+ *,
+ attribute_filters: Iterable[QueryConditionParam],
+ attributes_to_return: SequenceNotStr[str],
+ max_depth: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> TelemetryQuerySpansResponse:
+ """
+ Query spans.
+
+ Args:
+ attribute_filters: The attribute filters to apply to the spans.
+
+ attributes_to_return: The attributes to return in the spans.
+
+ max_depth: The maximum depth of the tree.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v1alpha/telemetry/spans",
+ body=await async_maybe_transform(
+ {
+ "attribute_filters": attribute_filters,
+ "attributes_to_return": attributes_to_return,
+ "max_depth": max_depth,
+ },
+ telemetry_query_spans_params.TelemetryQuerySpansParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ post_parser=DataWrapper[TelemetryQuerySpansResponse]._unwrapper,
+ ),
+ cast_to=cast(Type[TelemetryQuerySpansResponse], DataWrapper[TelemetryQuerySpansResponse]),
+ )
+
+ async def query_traces(
+ self,
+ *,
+ attribute_filters: Iterable[QueryConditionParam] | Omit = omit,
+ limit: int | Omit = omit,
+ offset: int | Omit = omit,
+ order_by: SequenceNotStr[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> TelemetryQueryTracesResponse:
+ """
+ Query traces.
+
+ Args:
+ attribute_filters: The attribute filters to apply to the traces.
+
+ limit: The limit of traces to return.
+
+ offset: The offset of the traces to return.
+
+ order_by: The order by of the traces to return.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v1alpha/telemetry/traces",
+ body=await async_maybe_transform(
+ {
+ "attribute_filters": attribute_filters,
+ "limit": limit,
+ "offset": offset,
+ "order_by": order_by,
+ },
+ telemetry_query_traces_params.TelemetryQueryTracesParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ post_parser=DataWrapper[TelemetryQueryTracesResponse]._unwrapper,
+ ),
+ cast_to=cast(Type[TelemetryQueryTracesResponse], DataWrapper[TelemetryQueryTracesResponse]),
+ )
+
+ async def save_spans_to_dataset(
+ self,
+ *,
+ attribute_filters: Iterable[QueryConditionParam],
+ attributes_to_save: SequenceNotStr[str],
+ dataset_id: str,
+ max_depth: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -119,12 +719,16 @@ async def log_event(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
- Log an event.
+ Save spans to a dataset.
Args:
- event: The event to log.
+ attribute_filters: The attribute filters to apply to the spans.
- ttl_seconds: The time to live of the event.
+ attributes_to_save: The attributes to save to the dataset.
+
+ dataset_id: The ID of the dataset to save the spans to.
+
+ max_depth: The maximum depth of the tree.
extra_headers: Send extra headers
@@ -136,13 +740,15 @@ async def log_event(
"""
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
- "/v1/telemetry/events",
+ "/v1alpha/telemetry/spans/export",
body=await async_maybe_transform(
{
- "event": event,
- "ttl_seconds": ttl_seconds,
+ "attribute_filters": attribute_filters,
+ "attributes_to_save": attributes_to_save,
+ "dataset_id": dataset_id,
+ "max_depth": max_depth,
},
- telemetry_log_event_params.TelemetryLogEventParams,
+ telemetry_save_spans_to_dataset_params.TelemetrySaveSpansToDatasetParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -155,8 +761,26 @@ class TelemetryResourceWithRawResponse:
def __init__(self, telemetry: TelemetryResource) -> None:
self._telemetry = telemetry
- self.log_event = to_raw_response_wrapper(
- telemetry.log_event,
+ self.get_span = to_raw_response_wrapper(
+ telemetry.get_span,
+ )
+ self.get_span_tree = to_raw_response_wrapper(
+ telemetry.get_span_tree,
+ )
+ self.get_trace = to_raw_response_wrapper(
+ telemetry.get_trace,
+ )
+ self.query_metrics = to_raw_response_wrapper(
+ telemetry.query_metrics,
+ )
+ self.query_spans = to_raw_response_wrapper(
+ telemetry.query_spans,
+ )
+ self.query_traces = to_raw_response_wrapper(
+ telemetry.query_traces,
+ )
+ self.save_spans_to_dataset = to_raw_response_wrapper(
+ telemetry.save_spans_to_dataset,
)
@@ -164,8 +788,26 @@ class AsyncTelemetryResourceWithRawResponse:
def __init__(self, telemetry: AsyncTelemetryResource) -> None:
self._telemetry = telemetry
- self.log_event = async_to_raw_response_wrapper(
- telemetry.log_event,
+ self.get_span = async_to_raw_response_wrapper(
+ telemetry.get_span,
+ )
+ self.get_span_tree = async_to_raw_response_wrapper(
+ telemetry.get_span_tree,
+ )
+ self.get_trace = async_to_raw_response_wrapper(
+ telemetry.get_trace,
+ )
+ self.query_metrics = async_to_raw_response_wrapper(
+ telemetry.query_metrics,
+ )
+ self.query_spans = async_to_raw_response_wrapper(
+ telemetry.query_spans,
+ )
+ self.query_traces = async_to_raw_response_wrapper(
+ telemetry.query_traces,
+ )
+ self.save_spans_to_dataset = async_to_raw_response_wrapper(
+ telemetry.save_spans_to_dataset,
)
@@ -173,8 +815,26 @@ class TelemetryResourceWithStreamingResponse:
def __init__(self, telemetry: TelemetryResource) -> None:
self._telemetry = telemetry
- self.log_event = to_streamed_response_wrapper(
- telemetry.log_event,
+ self.get_span = to_streamed_response_wrapper(
+ telemetry.get_span,
+ )
+ self.get_span_tree = to_streamed_response_wrapper(
+ telemetry.get_span_tree,
+ )
+ self.get_trace = to_streamed_response_wrapper(
+ telemetry.get_trace,
+ )
+ self.query_metrics = to_streamed_response_wrapper(
+ telemetry.query_metrics,
+ )
+ self.query_spans = to_streamed_response_wrapper(
+ telemetry.query_spans,
+ )
+ self.query_traces = to_streamed_response_wrapper(
+ telemetry.query_traces,
+ )
+ self.save_spans_to_dataset = to_streamed_response_wrapper(
+ telemetry.save_spans_to_dataset,
)
@@ -182,6 +842,24 @@ class AsyncTelemetryResourceWithStreamingResponse:
def __init__(self, telemetry: AsyncTelemetryResource) -> None:
self._telemetry = telemetry
- self.log_event = async_to_streamed_response_wrapper(
- telemetry.log_event,
+ self.get_span = async_to_streamed_response_wrapper(
+ telemetry.get_span,
+ )
+ self.get_span_tree = async_to_streamed_response_wrapper(
+ telemetry.get_span_tree,
+ )
+ self.get_trace = async_to_streamed_response_wrapper(
+ telemetry.get_trace,
+ )
+ self.query_metrics = async_to_streamed_response_wrapper(
+ telemetry.query_metrics,
+ )
+ self.query_spans = async_to_streamed_response_wrapper(
+ telemetry.query_spans,
+ )
+ self.query_traces = async_to_streamed_response_wrapper(
+ telemetry.query_traces,
+ )
+ self.save_spans_to_dataset = async_to_streamed_response_wrapper(
+ telemetry.save_spans_to_dataset,
)
diff --git a/src/llama_stack_client/types/__init__.py b/src/llama_stack_client/types/__init__.py
index 6cc3787f..eeed2336 100644
--- a/src/llama_stack_client/types/__init__.py
+++ b/src/llama_stack_client/types/__init__.py
@@ -4,6 +4,7 @@
from .file import File as File
from .model import Model as Model
+from .trace import Trace as Trace
from .shared import (
Message as Message,
Document as Document,
@@ -25,10 +26,10 @@
)
from .shield import Shield as Shield
from .tool_def import ToolDef as ToolDef
+from .benchmark import Benchmark as Benchmark
from .route_info import RouteInfo as RouteInfo
from .scoring_fn import ScoringFn as ScoringFn
from .tool_group import ToolGroup as ToolGroup
-from .event_param import EventParam as EventParam
from .health_info import HealthInfo as HealthInfo
from .vector_store import VectorStore as VectorStore
from .version_info import VersionInfo as VersionInfo
@@ -37,6 +38,7 @@
from .create_response import CreateResponse as CreateResponse
from .response_object import ResponseObject as ResponseObject
from .file_list_params import FileListParams as FileListParams
+from .span_with_status import SpanWithStatus as SpanWithStatus
from .tool_list_params import ToolListParams as ToolListParams
from .scoring_fn_params import ScoringFnParams as ScoringFnParams
from .file_create_params import FileCreateParams as FileCreateParams
@@ -48,13 +50,17 @@
from .delete_file_response import DeleteFileResponse as DeleteFileResponse
from .list_models_response import ListModelsResponse as ListModelsResponse
from .list_routes_response import ListRoutesResponse as ListRoutesResponse
+from .query_spans_response import QuerySpansResponse as QuerySpansResponse
from .response_list_params import ResponseListParams as ResponseListParams
from .scoring_score_params import ScoringScoreParams as ScoringScoreParams
from .shield_list_response import ShieldListResponse as ShieldListResponse
from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
+from .dataset_list_response import DatasetListResponse as DatasetListResponse
from .list_shields_response import ListShieldsResponse as ListShieldsResponse
from .model_register_params import ModelRegisterParams as ModelRegisterParams
from .query_chunks_response import QueryChunksResponse as QueryChunksResponse
+from .query_condition_param import QueryConditionParam as QueryConditionParam
+from .list_datasets_response import ListDatasetsResponse as ListDatasetsResponse
from .provider_list_response import ProviderListResponse as ProviderListResponse
from .response_create_params import ResponseCreateParams as ResponseCreateParams
from .response_list_response import ResponseListResponse as ResponseListResponse
@@ -63,6 +69,9 @@
from .shield_register_params import ShieldRegisterParams as ShieldRegisterParams
from .tool_invocation_result import ToolInvocationResult as ToolInvocationResult
from .vector_io_query_params import VectorIoQueryParams as VectorIoQueryParams
+from .benchmark_list_response import BenchmarkListResponse as BenchmarkListResponse
+from .dataset_iterrows_params import DatasetIterrowsParams as DatasetIterrowsParams
+from .dataset_register_params import DatasetRegisterParams as DatasetRegisterParams
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
from .list_providers_response import ListProvidersResponse as ListProvidersResponse
from .scoring_fn_params_param import ScoringFnParamsParam as ScoringFnParamsParam
@@ -70,35 +79,52 @@
from .vector_db_list_response import VectorDBListResponse as VectorDBListResponse
from .vector_io_insert_params import VectorIoInsertParams as VectorIoInsertParams
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
+from .list_benchmarks_response import ListBenchmarksResponse as ListBenchmarksResponse
from .list_vector_dbs_response import ListVectorDBsResponse as ListVectorDBsResponse
from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
from .response_create_response import ResponseCreateResponse as ResponseCreateResponse
from .response_delete_response import ResponseDeleteResponse as ResponseDeleteResponse
from .safety_run_shield_params import SafetyRunShieldParams as SafetyRunShieldParams
from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
+from .benchmark_register_params import BenchmarkRegisterParams as BenchmarkRegisterParams
+from .dataset_appendrows_params import DatasetAppendrowsParams as DatasetAppendrowsParams
+from .dataset_iterrows_response import DatasetIterrowsResponse as DatasetIterrowsResponse
+from .dataset_register_response import DatasetRegisterResponse as DatasetRegisterResponse
+from .dataset_retrieve_response import DatasetRetrieveResponse as DatasetRetrieveResponse
from .list_tool_groups_response import ListToolGroupsResponse as ListToolGroupsResponse
from .toolgroup_register_params import ToolgroupRegisterParams as ToolgroupRegisterParams
from .vector_db_register_params import VectorDBRegisterParams as VectorDBRegisterParams
from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse
from .create_embeddings_response import CreateEmbeddingsResponse as CreateEmbeddingsResponse
from .scoring_score_batch_params import ScoringScoreBatchParams as ScoringScoreBatchParams
-from .telemetry_log_event_params import TelemetryLogEventParams as TelemetryLogEventParams
from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
from .list_vector_stores_response import ListVectorStoresResponse as ListVectorStoresResponse
+from .telemetry_get_span_response import TelemetryGetSpanResponse as TelemetryGetSpanResponse
from .vector_db_register_response import VectorDBRegisterResponse as VectorDBRegisterResponse
from .vector_db_retrieve_response import VectorDBRetrieveResponse as VectorDBRetrieveResponse
from .scoring_score_batch_response import ScoringScoreBatchResponse as ScoringScoreBatchResponse
+from .telemetry_query_spans_params import TelemetryQuerySpansParams as TelemetryQuerySpansParams
from .vector_store_delete_response import VectorStoreDeleteResponse as VectorStoreDeleteResponse
from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
+from .telemetry_query_traces_params import TelemetryQueryTracesParams as TelemetryQueryTracesParams
from .scoring_function_list_response import ScoringFunctionListResponse as ScoringFunctionListResponse
+from .telemetry_get_span_tree_params import TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams
+from .telemetry_query_metrics_params import TelemetryQueryMetricsParams as TelemetryQueryMetricsParams
+from .telemetry_query_spans_response import TelemetryQuerySpansResponse as TelemetryQuerySpansResponse
from .tool_runtime_list_tools_params import ToolRuntimeListToolsParams as ToolRuntimeListToolsParams
from .list_scoring_functions_response import ListScoringFunctionsResponse as ListScoringFunctionsResponse
+from .telemetry_query_traces_response import TelemetryQueryTracesResponse as TelemetryQueryTracesResponse
from .tool_runtime_invoke_tool_params import ToolRuntimeInvokeToolParams as ToolRuntimeInvokeToolParams
from .scoring_function_register_params import ScoringFunctionRegisterParams as ScoringFunctionRegisterParams
+from .telemetry_get_span_tree_response import TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse
+from .telemetry_query_metrics_response import TelemetryQueryMetricsResponse as TelemetryQueryMetricsResponse
from .tool_runtime_list_tools_response import ToolRuntimeListToolsResponse as ToolRuntimeListToolsResponse
from .synthetic_data_generation_response import SyntheticDataGenerationResponse as SyntheticDataGenerationResponse
+from .telemetry_save_spans_to_dataset_params import (
+ TelemetrySaveSpansToDatasetParams as TelemetrySaveSpansToDatasetParams,
+)
from .synthetic_data_generation_generate_params import (
SyntheticDataGenerationGenerateParams as SyntheticDataGenerationGenerateParams,
)
diff --git a/src/llama_stack_client/types/benchmark.py b/src/llama_stack_client/types/benchmark.py
new file mode 100644
index 00000000..eb6dde75
--- /dev/null
+++ b/src/llama_stack_client/types/benchmark.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["Benchmark"]
+
+
+class Benchmark(BaseModel):
+ dataset_id: str
+ """Identifier of the dataset to use for the benchmark evaluation"""
+
+ identifier: str
+
+ metadata: Dict[str, Union[bool, float, str, List[object], object, None]]
+ """Metadata for this evaluation task"""
+
+ provider_id: str
+
+ scoring_functions: List[str]
+ """List of scoring function identifiers to apply during evaluation"""
+
+ type: Literal["benchmark"]
+ """The resource type, always benchmark"""
+
+ provider_resource_id: Optional[str] = None
diff --git a/src/llama_stack_client/types/benchmark_list_response.py b/src/llama_stack_client/types/benchmark_list_response.py
new file mode 100644
index 00000000..b2e8ad2b
--- /dev/null
+++ b/src/llama_stack_client/types/benchmark_list_response.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import TypeAlias
+
+from .benchmark import Benchmark
+
+__all__ = ["BenchmarkListResponse"]
+
+BenchmarkListResponse: TypeAlias = List[Benchmark]
diff --git a/src/llama_stack_client/types/benchmark_register_params.py b/src/llama_stack_client/types/benchmark_register_params.py
new file mode 100644
index 00000000..322e2da8
--- /dev/null
+++ b/src/llama_stack_client/types/benchmark_register_params.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable
+from typing_extensions import Required, TypedDict
+
+from .._types import SequenceNotStr
+
+__all__ = ["BenchmarkRegisterParams"]
+
+
+class BenchmarkRegisterParams(TypedDict, total=False):
+ benchmark_id: Required[str]
+ """The ID of the benchmark to register."""
+
+ dataset_id: Required[str]
+ """The ID of the dataset to use for the benchmark."""
+
+ scoring_functions: Required[SequenceNotStr[str]]
+ """The scoring functions to use for the benchmark."""
+
+ metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
+ """The metadata to use for the benchmark."""
+
+ provider_benchmark_id: str
+ """The ID of the provider benchmark to use for the benchmark."""
+
+ provider_id: str
+ """The ID of the provider to use for the benchmark."""
diff --git a/src/llama_stack_client/types/dataset_appendrows_params.py b/src/llama_stack_client/types/dataset_appendrows_params.py
new file mode 100644
index 00000000..2e96e124
--- /dev/null
+++ b/src/llama_stack_client/types/dataset_appendrows_params.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable
+from typing_extensions import Required, TypedDict
+
+__all__ = ["DatasetAppendrowsParams"]
+
+
+class DatasetAppendrowsParams(TypedDict, total=False):
+ rows: Required[Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]]]
+ """The rows to append to the dataset."""
diff --git a/src/llama_stack_client/types/dataset_iterrows_params.py b/src/llama_stack_client/types/dataset_iterrows_params.py
new file mode 100644
index 00000000..99065312
--- /dev/null
+++ b/src/llama_stack_client/types/dataset_iterrows_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["DatasetIterrowsParams"]
+
+
+class DatasetIterrowsParams(TypedDict, total=False):
+ limit: int
+ """The number of rows to get."""
+
+ start_index: int
+ """Index into dataset for the first row to get. Get all rows if None."""
diff --git a/src/llama_stack_client/types/dataset_iterrows_response.py b/src/llama_stack_client/types/dataset_iterrows_response.py
new file mode 100644
index 00000000..8681b018
--- /dev/null
+++ b/src/llama_stack_client/types/dataset_iterrows_response.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+
+from .._models import BaseModel
+
+__all__ = ["DatasetIterrowsResponse"]
+
+
+class DatasetIterrowsResponse(BaseModel):
+ data: List[Dict[str, Union[bool, float, str, List[object], object, None]]]
+ """The list of items for the current page"""
+
+ has_more: bool
+ """Whether there are more items available after this set"""
+
+ url: Optional[str] = None
+ """The URL for accessing this list"""
diff --git a/src/llama_stack_client/types/dataset_list_response.py b/src/llama_stack_client/types/dataset_list_response.py
new file mode 100644
index 00000000..7080e589
--- /dev/null
+++ b/src/llama_stack_client/types/dataset_list_response.py
@@ -0,0 +1,66 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from .._utils import PropertyInfo
+from .._models import BaseModel
+
+__all__ = [
+ "DatasetListResponse",
+ "DatasetListResponseItem",
+ "DatasetListResponseItemSource",
+ "DatasetListResponseItemSourceUriDataSource",
+ "DatasetListResponseItemSourceRowsDataSource",
+]
+
+
+class DatasetListResponseItemSourceUriDataSource(BaseModel):
+ type: Literal["uri"]
+
+ uri: str
+ """The dataset can be obtained from a URI.
+
+ E.g. - "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" -
+ "data:csv;base64,{base64_content}"
+ """
+
+
+class DatasetListResponseItemSourceRowsDataSource(BaseModel):
+ rows: List[Dict[str, Union[bool, float, str, List[object], object, None]]]
+ """The dataset is stored in rows.
+
+ E.g. - [ {"messages": [{"role": "user", "content": "Hello, world!"}, {"role":
+ "assistant", "content": "Hello, world!"}]} ]
+ """
+
+ type: Literal["rows"]
+
+
+DatasetListResponseItemSource: TypeAlias = Annotated[
+ Union[DatasetListResponseItemSourceUriDataSource, DatasetListResponseItemSourceRowsDataSource],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DatasetListResponseItem(BaseModel):
+ identifier: str
+
+ metadata: Dict[str, Union[bool, float, str, List[object], object, None]]
+ """Additional metadata for the dataset"""
+
+ provider_id: str
+
+ purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"]
+ """Purpose of the dataset indicating its intended use"""
+
+ source: DatasetListResponseItemSource
+ """Data source configuration for the dataset"""
+
+ type: Literal["dataset"]
+ """Type of resource, always 'dataset' for datasets"""
+
+ provider_resource_id: Optional[str] = None
+
+
+DatasetListResponse: TypeAlias = List[DatasetListResponseItem]
diff --git a/src/llama_stack_client/types/dataset_register_params.py b/src/llama_stack_client/types/dataset_register_params.py
new file mode 100644
index 00000000..6fd5db3f
--- /dev/null
+++ b/src/llama_stack_client/types/dataset_register_params.py
@@ -0,0 +1,69 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = ["DatasetRegisterParams", "Source", "SourceUriDataSource", "SourceRowsDataSource"]
+
+
+class DatasetRegisterParams(TypedDict, total=False):
+ purpose: Required[Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"]]
+ """The purpose of the dataset.
+
+ One of: - "post-training/messages": The dataset contains a messages column with
+ list of messages for post-training. { "messages": [ {"role": "user", "content":
+ "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}, ] } -
+ "eval/question-answer": The dataset contains a question column and an answer
+ column for evaluation. { "question": "What is the capital of France?", "answer":
+ "Paris" } - "eval/messages-answer": The dataset contains a messages column with
+ list of messages and an answer column for evaluation. { "messages": [ {"role":
+ "user", "content": "Hello, my name is John Doe."}, {"role": "assistant",
+ "content": "Hello, John Doe. How can I help you today?"}, {"role": "user",
+ "content": "What's my name?"}, ], "answer": "John Doe" }
+ """
+
+ source: Required[Source]
+ """The data source of the dataset.
+
+ Ensure that the data source schema is compatible with the purpose of the
+ dataset. Examples: - { "type": "uri", "uri":
+ "https://mywebsite.com/mydata.jsonl" } - { "type": "uri", "uri":
+ "lsfs://mydata.jsonl" } - { "type": "uri", "uri":
+ "data:csv;base64,{base64_content}" } - { "type": "uri", "uri":
+ "huggingface://llamastack/simpleqa?split=train" } - { "type": "rows", "rows": [
+ { "messages": [ {"role": "user", "content": "Hello, world!"}, {"role":
+ "assistant", "content": "Hello, world!"}, ] } ] }
+ """
+
+ dataset_id: str
+ """The ID of the dataset. If not provided, an ID will be generated."""
+
+ metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
+ """The metadata for the dataset. - E.g. {"description": "My dataset"}."""
+
+
+class SourceUriDataSource(TypedDict, total=False):
+ type: Required[Literal["uri"]]
+
+ uri: Required[str]
+ """The dataset can be obtained from a URI.
+
+ E.g. - "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" -
+ "data:csv;base64,{base64_content}"
+ """
+
+
+class SourceRowsDataSource(TypedDict, total=False):
+ rows: Required[Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]]]
+ """The dataset is stored in rows.
+
+ E.g. - [ {"messages": [{"role": "user", "content": "Hello, world!"}, {"role":
+ "assistant", "content": "Hello, world!"}]} ]
+ """
+
+ type: Required[Literal["rows"]]
+
+
+Source: TypeAlias = Union[SourceUriDataSource, SourceRowsDataSource]
diff --git a/src/llama_stack_client/types/dataset_register_response.py b/src/llama_stack_client/types/dataset_register_response.py
new file mode 100644
index 00000000..8da590b8
--- /dev/null
+++ b/src/llama_stack_client/types/dataset_register_response.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from .._utils import PropertyInfo
+from .._models import BaseModel
+
+__all__ = ["DatasetRegisterResponse", "Source", "SourceUriDataSource", "SourceRowsDataSource"]
+
+
+class SourceUriDataSource(BaseModel):
+ type: Literal["uri"]
+
+ uri: str
+ """The dataset can be obtained from a URI.
+
+ E.g. - "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" -
+ "data:csv;base64,{base64_content}"
+ """
+
+
+class SourceRowsDataSource(BaseModel):
+ rows: List[Dict[str, Union[bool, float, str, List[object], object, None]]]
+ """The dataset is stored in rows.
+
+ E.g. - [ {"messages": [{"role": "user", "content": "Hello, world!"}, {"role":
+ "assistant", "content": "Hello, world!"}]} ]
+ """
+
+ type: Literal["rows"]
+
+
+Source: TypeAlias = Annotated[Union[SourceUriDataSource, SourceRowsDataSource], PropertyInfo(discriminator="type")]
+
+
+class DatasetRegisterResponse(BaseModel):
+ identifier: str
+
+ metadata: Dict[str, Union[bool, float, str, List[object], object, None]]
+ """Additional metadata for the dataset"""
+
+ provider_id: str
+
+ purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"]
+ """Purpose of the dataset indicating its intended use"""
+
+ source: Source
+ """Data source configuration for the dataset"""
+
+ type: Literal["dataset"]
+ """Type of resource, always 'dataset' for datasets"""
+
+ provider_resource_id: Optional[str] = None
diff --git a/src/llama_stack_client/types/dataset_retrieve_response.py b/src/llama_stack_client/types/dataset_retrieve_response.py
new file mode 100644
index 00000000..6cda0a42
--- /dev/null
+++ b/src/llama_stack_client/types/dataset_retrieve_response.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from .._utils import PropertyInfo
+from .._models import BaseModel
+
+__all__ = ["DatasetRetrieveResponse", "Source", "SourceUriDataSource", "SourceRowsDataSource"]
+
+
+class SourceUriDataSource(BaseModel):
+ type: Literal["uri"]
+
+ uri: str
+ """The dataset can be obtained from a URI.
+
+ E.g. - "https://mywebsite.com/mydata.jsonl" - "lsfs://mydata.jsonl" -
+ "data:csv;base64,{base64_content}"
+ """
+
+
+class SourceRowsDataSource(BaseModel):
+ rows: List[Dict[str, Union[bool, float, str, List[object], object, None]]]
+ """The dataset is stored in rows.
+
+ E.g. - [ {"messages": [{"role": "user", "content": "Hello, world!"}, {"role":
+ "assistant", "content": "Hello, world!"}]} ]
+ """
+
+ type: Literal["rows"]
+
+
+Source: TypeAlias = Annotated[Union[SourceUriDataSource, SourceRowsDataSource], PropertyInfo(discriminator="type")]
+
+
+class DatasetRetrieveResponse(BaseModel):
+ identifier: str
+
+ metadata: Dict[str, Union[bool, float, str, List[object], object, None]]
+ """Additional metadata for the dataset"""
+
+ provider_id: str
+
+ purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"]
+ """Purpose of the dataset indicating its intended use"""
+
+ source: Source
+ """Data source configuration for the dataset"""
+
+ type: Literal["dataset"]
+ """Type of resource, always 'dataset' for datasets"""
+
+ provider_resource_id: Optional[str] = None
diff --git a/src/llama_stack_client/types/event_param.py b/src/llama_stack_client/types/event_param.py
deleted file mode 100644
index b26f2916..00000000
--- a/src/llama_stack_client/types/event_param.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union
-from datetime import datetime
-from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict
-
-from .._utils import PropertyInfo
-
-__all__ = [
- "EventParam",
- "UnstructuredLogEvent",
- "MetricEvent",
- "StructuredLogEvent",
- "StructuredLogEventPayload",
- "StructuredLogEventPayloadSpanStartPayload",
- "StructuredLogEventPayloadSpanEndPayload",
-]
-
-
-class UnstructuredLogEvent(TypedDict, total=False):
- message: Required[str]
- """The log message text"""
-
- severity: Required[Literal["verbose", "debug", "info", "warn", "error", "critical"]]
- """The severity level of the log message"""
-
- span_id: Required[str]
- """Unique identifier for the span this event belongs to"""
-
- timestamp: Required[Annotated[Union[str, datetime], PropertyInfo(format="iso8601")]]
- """Timestamp when the event occurred"""
-
- trace_id: Required[str]
- """Unique identifier for the trace this event belongs to"""
-
- type: Required[Literal["unstructured_log"]]
- """Event type identifier set to UNSTRUCTURED_LOG"""
-
- attributes: Dict[str, Union[str, float, bool, None]]
- """(Optional) Key-value pairs containing additional metadata about the event"""
-
-
-class MetricEvent(TypedDict, total=False):
- metric: Required[str]
- """The name of the metric being measured"""
-
- span_id: Required[str]
- """Unique identifier for the span this event belongs to"""
-
- timestamp: Required[Annotated[Union[str, datetime], PropertyInfo(format="iso8601")]]
- """Timestamp when the event occurred"""
-
- trace_id: Required[str]
- """Unique identifier for the trace this event belongs to"""
-
- type: Required[Literal["metric"]]
- """Event type identifier set to METRIC"""
-
- unit: Required[str]
- """The unit of measurement for the metric value"""
-
- value: Required[float]
- """The numeric value of the metric measurement"""
-
- attributes: Dict[str, Union[str, float, bool, None]]
- """(Optional) Key-value pairs containing additional metadata about the event"""
-
-
-class StructuredLogEventPayloadSpanStartPayload(TypedDict, total=False):
- name: Required[str]
- """Human-readable name describing the operation this span represents"""
-
- type: Required[Literal["span_start"]]
- """Payload type identifier set to SPAN_START"""
-
- parent_span_id: str
- """(Optional) Unique identifier for the parent span, if this is a child span"""
-
-
-class StructuredLogEventPayloadSpanEndPayload(TypedDict, total=False):
- status: Required[Literal["ok", "error"]]
- """The final status of the span indicating success or failure"""
-
- type: Required[Literal["span_end"]]
- """Payload type identifier set to SPAN_END"""
-
-
-StructuredLogEventPayload: TypeAlias = Union[
- StructuredLogEventPayloadSpanStartPayload, StructuredLogEventPayloadSpanEndPayload
-]
-
-
-class StructuredLogEvent(TypedDict, total=False):
- payload: Required[StructuredLogEventPayload]
- """The structured payload data for the log event"""
-
- span_id: Required[str]
- """Unique identifier for the span this event belongs to"""
-
- timestamp: Required[Annotated[Union[str, datetime], PropertyInfo(format="iso8601")]]
- """Timestamp when the event occurred"""
-
- trace_id: Required[str]
- """Unique identifier for the trace this event belongs to"""
-
- type: Required[Literal["structured_log"]]
- """Event type identifier set to STRUCTURED_LOG"""
-
- attributes: Dict[str, Union[str, float, bool, None]]
- """(Optional) Key-value pairs containing additional metadata about the event"""
-
-
-EventParam: TypeAlias = Union[UnstructuredLogEvent, MetricEvent, StructuredLogEvent]
diff --git a/src/llama_stack_client/types/list_benchmarks_response.py b/src/llama_stack_client/types/list_benchmarks_response.py
new file mode 100644
index 00000000..f265f130
--- /dev/null
+++ b/src/llama_stack_client/types/list_benchmarks_response.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+from .benchmark_list_response import BenchmarkListResponse
+
+__all__ = ["ListBenchmarksResponse"]
+
+
+class ListBenchmarksResponse(BaseModel):
+ data: BenchmarkListResponse
diff --git a/src/llama_stack_client/types/list_datasets_response.py b/src/llama_stack_client/types/list_datasets_response.py
new file mode 100644
index 00000000..21c4b72a
--- /dev/null
+++ b/src/llama_stack_client/types/list_datasets_response.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+from .dataset_list_response import DatasetListResponse
+
+__all__ = ["ListDatasetsResponse"]
+
+
+class ListDatasetsResponse(BaseModel):
+ data: DatasetListResponse
+ """List of datasets"""
diff --git a/src/llama_stack_client/types/query_condition_param.py b/src/llama_stack_client/types/query_condition_param.py
new file mode 100644
index 00000000..59def1b4
--- /dev/null
+++ b/src/llama_stack_client/types/query_condition_param.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["QueryConditionParam"]
+
+
+class QueryConditionParam(TypedDict, total=False):
+ key: Required[str]
+ """The attribute key to filter on"""
+
+ op: Required[Literal["eq", "ne", "gt", "lt"]]
+ """The comparison operator to apply"""
+
+ value: Required[Union[bool, float, str, Iterable[object], object, None]]
+ """The value to compare against"""
diff --git a/src/llama_stack_client/types/query_spans_response.py b/src/llama_stack_client/types/query_spans_response.py
new file mode 100644
index 00000000..a20c9b92
--- /dev/null
+++ b/src/llama_stack_client/types/query_spans_response.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+from .telemetry_query_spans_response import TelemetryQuerySpansResponse
+
+__all__ = ["QuerySpansResponse"]
+
+
+class QuerySpansResponse(BaseModel):
+ data: TelemetryQuerySpansResponse
+ """List of spans matching the query criteria"""
diff --git a/src/llama_stack_client/types/span_with_status.py b/src/llama_stack_client/types/span_with_status.py
new file mode 100644
index 00000000..04d124bd
--- /dev/null
+++ b/src/llama_stack_client/types/span_with_status.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["SpanWithStatus"]
+
+
+class SpanWithStatus(BaseModel):
+ name: str
+ """Human-readable name describing the operation this span represents"""
+
+ span_id: str
+ """Unique identifier for the span"""
+
+ start_time: datetime
+ """Timestamp when the operation began"""
+
+ trace_id: str
+ """Unique identifier for the trace this span belongs to"""
+
+ attributes: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """(Optional) Key-value pairs containing additional metadata about the span"""
+
+ end_time: Optional[datetime] = None
+ """(Optional) Timestamp when the operation finished, if completed"""
+
+ parent_span_id: Optional[str] = None
+ """(Optional) Unique identifier for the parent span, if this is a child span"""
+
+ status: Optional[Literal["ok", "error"]] = None
+ """(Optional) The current status of the span"""
diff --git a/src/llama_stack_client/types/telemetry_get_span_response.py b/src/llama_stack_client/types/telemetry_get_span_response.py
new file mode 100644
index 00000000..6826d4d0
--- /dev/null
+++ b/src/llama_stack_client/types/telemetry_get_span_response.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from datetime import datetime
+
+from .._models import BaseModel
+
+__all__ = ["TelemetryGetSpanResponse"]
+
+
+class TelemetryGetSpanResponse(BaseModel):
+ name: str
+ """Human-readable name describing the operation this span represents"""
+
+ span_id: str
+ """Unique identifier for the span"""
+
+ start_time: datetime
+ """Timestamp when the operation began"""
+
+ trace_id: str
+ """Unique identifier for the trace this span belongs to"""
+
+ attributes: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """(Optional) Key-value pairs containing additional metadata about the span"""
+
+ end_time: Optional[datetime] = None
+ """(Optional) Timestamp when the operation finished, if completed"""
+
+ parent_span_id: Optional[str] = None
+ """(Optional) Unique identifier for the parent span, if this is a child span"""
diff --git a/src/llama_stack_client/types/telemetry_get_span_tree_params.py b/src/llama_stack_client/types/telemetry_get_span_tree_params.py
new file mode 100644
index 00000000..92dc7e1d
--- /dev/null
+++ b/src/llama_stack_client/types/telemetry_get_span_tree_params.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from .._types import SequenceNotStr
+
+__all__ = ["TelemetryGetSpanTreeParams"]
+
+
+class TelemetryGetSpanTreeParams(TypedDict, total=False):
+ attributes_to_return: SequenceNotStr[str]
+ """The attributes to return in the tree."""
+
+ max_depth: int
+ """The maximum depth of the tree."""
diff --git a/src/llama_stack_client/types/telemetry_get_span_tree_response.py b/src/llama_stack_client/types/telemetry_get_span_tree_response.py
new file mode 100644
index 00000000..b72e6158
--- /dev/null
+++ b/src/llama_stack_client/types/telemetry_get_span_tree_response.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict
+from typing_extensions import TypeAlias
+
+from .span_with_status import SpanWithStatus
+
+__all__ = ["TelemetryGetSpanTreeResponse"]
+
+TelemetryGetSpanTreeResponse: TypeAlias = Dict[str, SpanWithStatus]
diff --git a/src/llama_stack_client/types/telemetry_log_event_params.py b/src/llama_stack_client/types/telemetry_log_event_params.py
deleted file mode 100644
index 246b6526..00000000
--- a/src/llama_stack_client/types/telemetry_log_event_params.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-from .event_param import EventParam
-
-__all__ = ["TelemetryLogEventParams"]
-
-
-class TelemetryLogEventParams(TypedDict, total=False):
- event: Required[EventParam]
- """The event to log."""
-
- ttl_seconds: Required[int]
- """The time to live of the event."""
diff --git a/src/llama_stack_client/types/telemetry_query_metrics_params.py b/src/llama_stack_client/types/telemetry_query_metrics_params.py
new file mode 100644
index 00000000..adf3f720
--- /dev/null
+++ b/src/llama_stack_client/types/telemetry_query_metrics_params.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["TelemetryQueryMetricsParams", "LabelMatcher"]
+
+
+class TelemetryQueryMetricsParams(TypedDict, total=False):
+ query_type: Required[Literal["range", "instant"]]
+ """The type of query to perform."""
+
+ start_time: Required[int]
+ """The start time of the metric to query."""
+
+ end_time: int
+ """The end time of the metric to query."""
+
+ granularity: str
+ """The granularity of the metric to query."""
+
+ label_matchers: Iterable[LabelMatcher]
+ """The label matchers to apply to the metric."""
+
+
+class LabelMatcher(TypedDict, total=False):
+ name: Required[str]
+ """The name of the label to match"""
+
+ operator: Required[Literal["=", "!=", "=~", "!~"]]
+ """The comparison operator to use for matching"""
+
+ value: Required[str]
+ """The value to match against"""
diff --git a/src/llama_stack_client/types/telemetry_query_metrics_response.py b/src/llama_stack_client/types/telemetry_query_metrics_response.py
new file mode 100644
index 00000000..e9f4264e
--- /dev/null
+++ b/src/llama_stack_client/types/telemetry_query_metrics_response.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import TypeAlias
+
+from .._models import BaseModel
+
+__all__ = [
+ "TelemetryQueryMetricsResponse",
+ "TelemetryQueryMetricsResponseItem",
+ "TelemetryQueryMetricsResponseItemLabel",
+ "TelemetryQueryMetricsResponseItemValue",
+]
+
+
+class TelemetryQueryMetricsResponseItemLabel(BaseModel):
+ name: str
+ """The name of the label"""
+
+ value: str
+ """The value of the label"""
+
+
+class TelemetryQueryMetricsResponseItemValue(BaseModel):
+ timestamp: int
+ """Unix timestamp when the metric value was recorded"""
+
+ unit: str
+
+ value: float
+ """The numeric value of the metric at this timestamp"""
+
+
+class TelemetryQueryMetricsResponseItem(BaseModel):
+ labels: List[TelemetryQueryMetricsResponseItemLabel]
+ """List of labels associated with this metric series"""
+
+ metric: str
+ """The name of the metric"""
+
+ values: List[TelemetryQueryMetricsResponseItemValue]
+ """List of data points in chronological order"""
+
+
+TelemetryQueryMetricsResponse: TypeAlias = List[TelemetryQueryMetricsResponseItem]
diff --git a/src/llama_stack_client/types/telemetry_query_spans_params.py b/src/llama_stack_client/types/telemetry_query_spans_params.py
new file mode 100644
index 00000000..452439e3
--- /dev/null
+++ b/src/llama_stack_client/types/telemetry_query_spans_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Required, TypedDict
+
+from .._types import SequenceNotStr
+from .query_condition_param import QueryConditionParam
+
+__all__ = ["TelemetryQuerySpansParams"]
+
+
+class TelemetryQuerySpansParams(TypedDict, total=False):
+ attribute_filters: Required[Iterable[QueryConditionParam]]
+ """The attribute filters to apply to the spans."""
+
+ attributes_to_return: Required[SequenceNotStr[str]]
+ """The attributes to return in the spans."""
+
+ max_depth: int
+ """The maximum depth of the tree."""
diff --git a/src/llama_stack_client/types/telemetry_query_spans_response.py b/src/llama_stack_client/types/telemetry_query_spans_response.py
new file mode 100644
index 00000000..49eaeb38
--- /dev/null
+++ b/src/llama_stack_client/types/telemetry_query_spans_response.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from datetime import datetime
+from typing_extensions import TypeAlias
+
+from .._models import BaseModel
+
+__all__ = ["TelemetryQuerySpansResponse", "TelemetryQuerySpansResponseItem"]
+
+
+class TelemetryQuerySpansResponseItem(BaseModel):
+ name: str
+ """Human-readable name describing the operation this span represents"""
+
+ span_id: str
+ """Unique identifier for the span"""
+
+ start_time: datetime
+ """Timestamp when the operation began"""
+
+ trace_id: str
+ """Unique identifier for the trace this span belongs to"""
+
+ attributes: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """(Optional) Key-value pairs containing additional metadata about the span"""
+
+ end_time: Optional[datetime] = None
+ """(Optional) Timestamp when the operation finished, if completed"""
+
+ parent_span_id: Optional[str] = None
+ """(Optional) Unique identifier for the parent span, if this is a child span"""
+
+
+TelemetryQuerySpansResponse: TypeAlias = List[TelemetryQuerySpansResponseItem]
diff --git a/src/llama_stack_client/types/telemetry_query_traces_params.py b/src/llama_stack_client/types/telemetry_query_traces_params.py
new file mode 100644
index 00000000..2a6eb334
--- /dev/null
+++ b/src/llama_stack_client/types/telemetry_query_traces_params.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import TypedDict
+
+from .._types import SequenceNotStr
+from .query_condition_param import QueryConditionParam
+
+__all__ = ["TelemetryQueryTracesParams"]
+
+
+class TelemetryQueryTracesParams(TypedDict, total=False):
+ attribute_filters: Iterable[QueryConditionParam]
+ """The attribute filters to apply to the traces."""
+
+ limit: int
+ """The limit of traces to return."""
+
+ offset: int
+ """The offset of the traces to return."""
+
+ order_by: SequenceNotStr[str]
+ """The order by of the traces to return."""
diff --git a/src/llama_stack_client/types/telemetry_query_traces_response.py b/src/llama_stack_client/types/telemetry_query_traces_response.py
new file mode 100644
index 00000000..01a1365d
--- /dev/null
+++ b/src/llama_stack_client/types/telemetry_query_traces_response.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import TypeAlias
+
+from .trace import Trace
+
+__all__ = ["TelemetryQueryTracesResponse"]
+
+TelemetryQueryTracesResponse: TypeAlias = List[Trace]
diff --git a/src/llama_stack_client/types/telemetry_save_spans_to_dataset_params.py b/src/llama_stack_client/types/telemetry_save_spans_to_dataset_params.py
new file mode 100644
index 00000000..f0bdebbd
--- /dev/null
+++ b/src/llama_stack_client/types/telemetry_save_spans_to_dataset_params.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Required, TypedDict
+
+from .._types import SequenceNotStr
+from .query_condition_param import QueryConditionParam
+
+__all__ = ["TelemetrySaveSpansToDatasetParams"]
+
+
+class TelemetrySaveSpansToDatasetParams(TypedDict, total=False):
+ attribute_filters: Required[Iterable[QueryConditionParam]]
+ """The attribute filters to apply to the spans."""
+
+ attributes_to_save: Required[SequenceNotStr[str]]
+ """The attributes to save to the dataset."""
+
+ dataset_id: Required[str]
+ """The ID of the dataset to save the spans to."""
+
+ max_depth: int
+ """The maximum depth of the tree."""
diff --git a/src/llama_stack_client/types/trace.py b/src/llama_stack_client/types/trace.py
new file mode 100644
index 00000000..0657d616
--- /dev/null
+++ b/src/llama_stack_client/types/trace.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+
+from .._models import BaseModel
+
+__all__ = ["Trace"]
+
+
+class Trace(BaseModel):
+ root_span_id: str
+ """Unique identifier for the root span that started this trace"""
+
+ start_time: datetime
+ """Timestamp when the trace began"""
+
+ trace_id: str
+ """Unique identifier for the trace"""
+
+ end_time: Optional[datetime] = None
+ """(Optional) Timestamp when the trace finished, if completed"""
diff --git a/tests/api_resources/test_benchmarks.py b/tests/api_resources/test_benchmarks.py
new file mode 100644
index 00000000..97d3d5c9
--- /dev/null
+++ b/tests/api_resources/test_benchmarks.py
@@ -0,0 +1,248 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
+from llama_stack_client.types import Benchmark, BenchmarkListResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestBenchmarks:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_retrieve(self, client: LlamaStackClient) -> None:
+ benchmark = client.benchmarks.retrieve(
+ "benchmark_id",
+ )
+ assert_matches_type(Benchmark, benchmark, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+ response = client.benchmarks.with_raw_response.retrieve(
+ "benchmark_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ benchmark = response.parse()
+ assert_matches_type(Benchmark, benchmark, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+ with client.benchmarks.with_streaming_response.retrieve(
+ "benchmark_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ benchmark = response.parse()
+ assert_matches_type(Benchmark, benchmark, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
+ client.benchmarks.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_list(self, client: LlamaStackClient) -> None:
+ benchmark = client.benchmarks.list()
+ assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ response = client.benchmarks.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ benchmark = response.parse()
+ assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ with client.benchmarks.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ benchmark = response.parse()
+ assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_register(self, client: LlamaStackClient) -> None:
+ benchmark = client.benchmarks.register(
+ benchmark_id="benchmark_id",
+ dataset_id="dataset_id",
+ scoring_functions=["string"],
+ )
+ assert benchmark is None
+
+ @parametrize
+ def test_method_register_with_all_params(self, client: LlamaStackClient) -> None:
+ benchmark = client.benchmarks.register(
+ benchmark_id="benchmark_id",
+ dataset_id="dataset_id",
+ scoring_functions=["string"],
+ metadata={"foo": True},
+ provider_benchmark_id="provider_benchmark_id",
+ provider_id="provider_id",
+ )
+ assert benchmark is None
+
+ @parametrize
+ def test_raw_response_register(self, client: LlamaStackClient) -> None:
+ response = client.benchmarks.with_raw_response.register(
+ benchmark_id="benchmark_id",
+ dataset_id="dataset_id",
+ scoring_functions=["string"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ benchmark = response.parse()
+ assert benchmark is None
+
+ @parametrize
+ def test_streaming_response_register(self, client: LlamaStackClient) -> None:
+ with client.benchmarks.with_streaming_response.register(
+ benchmark_id="benchmark_id",
+ dataset_id="dataset_id",
+ scoring_functions=["string"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ benchmark = response.parse()
+ assert benchmark is None
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncBenchmarks:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ benchmark = await async_client.benchmarks.retrieve(
+ "benchmark_id",
+ )
+ assert_matches_type(Benchmark, benchmark, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.benchmarks.with_raw_response.retrieve(
+ "benchmark_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ benchmark = await response.parse()
+ assert_matches_type(Benchmark, benchmark, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.benchmarks.with_streaming_response.retrieve(
+ "benchmark_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ benchmark = await response.parse()
+ assert_matches_type(Benchmark, benchmark, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
+ await async_client.benchmarks.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ benchmark = await async_client.benchmarks.list()
+ assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.benchmarks.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ benchmark = await response.parse()
+ assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.benchmarks.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ benchmark = await response.parse()
+ assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_register(self, async_client: AsyncLlamaStackClient) -> None:
+ benchmark = await async_client.benchmarks.register(
+ benchmark_id="benchmark_id",
+ dataset_id="dataset_id",
+ scoring_functions=["string"],
+ )
+ assert benchmark is None
+
+ @parametrize
+ async def test_method_register_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ benchmark = await async_client.benchmarks.register(
+ benchmark_id="benchmark_id",
+ dataset_id="dataset_id",
+ scoring_functions=["string"],
+ metadata={"foo": True},
+ provider_benchmark_id="provider_benchmark_id",
+ provider_id="provider_id",
+ )
+ assert benchmark is None
+
+ @parametrize
+ async def test_raw_response_register(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.benchmarks.with_raw_response.register(
+ benchmark_id="benchmark_id",
+ dataset_id="dataset_id",
+ scoring_functions=["string"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ benchmark = await response.parse()
+ assert benchmark is None
+
+ @parametrize
+ async def test_streaming_response_register(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.benchmarks.with_streaming_response.register(
+ benchmark_id="benchmark_id",
+ dataset_id="dataset_id",
+ scoring_functions=["string"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ benchmark = await response.parse()
+ assert benchmark is None
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_datasets.py b/tests/api_resources/test_datasets.py
new file mode 100644
index 00000000..eee1de8c
--- /dev/null
+++ b/tests/api_resources/test_datasets.py
@@ -0,0 +1,521 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
+from llama_stack_client.types import (
+ DatasetListResponse,
+ DatasetIterrowsResponse,
+ DatasetRegisterResponse,
+ DatasetRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestDatasets:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_retrieve(self, client: LlamaStackClient) -> None:
+ dataset = client.datasets.retrieve(
+ "dataset_id",
+ )
+ assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+ response = client.datasets.with_raw_response.retrieve(
+ "dataset_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ dataset = response.parse()
+ assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+ with client.datasets.with_streaming_response.retrieve(
+ "dataset_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ dataset = response.parse()
+ assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
+ client.datasets.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_list(self, client: LlamaStackClient) -> None:
+ dataset = client.datasets.list()
+ assert_matches_type(DatasetListResponse, dataset, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ response = client.datasets.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ dataset = response.parse()
+ assert_matches_type(DatasetListResponse, dataset, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ with client.datasets.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ dataset = response.parse()
+ assert_matches_type(DatasetListResponse, dataset, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_appendrows(self, client: LlamaStackClient) -> None:
+ dataset = client.datasets.appendrows(
+ dataset_id="dataset_id",
+ rows=[{"foo": True}],
+ )
+ assert dataset is None
+
+ @parametrize
+ def test_raw_response_appendrows(self, client: LlamaStackClient) -> None:
+ response = client.datasets.with_raw_response.appendrows(
+ dataset_id="dataset_id",
+ rows=[{"foo": True}],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ dataset = response.parse()
+ assert dataset is None
+
+ @parametrize
+ def test_streaming_response_appendrows(self, client: LlamaStackClient) -> None:
+ with client.datasets.with_streaming_response.appendrows(
+ dataset_id="dataset_id",
+ rows=[{"foo": True}],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ dataset = response.parse()
+ assert dataset is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_appendrows(self, client: LlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
+ client.datasets.with_raw_response.appendrows(
+ dataset_id="",
+ rows=[{"foo": True}],
+ )
+
+ @parametrize
+ def test_method_iterrows(self, client: LlamaStackClient) -> None:
+ dataset = client.datasets.iterrows(
+ dataset_id="dataset_id",
+ )
+ assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
+
+ @parametrize
+ def test_method_iterrows_with_all_params(self, client: LlamaStackClient) -> None:
+ dataset = client.datasets.iterrows(
+ dataset_id="dataset_id",
+ limit=0,
+ start_index=0,
+ )
+ assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
+
+ @parametrize
+ def test_raw_response_iterrows(self, client: LlamaStackClient) -> None:
+ response = client.datasets.with_raw_response.iterrows(
+ dataset_id="dataset_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ dataset = response.parse()
+ assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
+
+ @parametrize
+ def test_streaming_response_iterrows(self, client: LlamaStackClient) -> None:
+ with client.datasets.with_streaming_response.iterrows(
+ dataset_id="dataset_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ dataset = response.parse()
+ assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_iterrows(self, client: LlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
+ client.datasets.with_raw_response.iterrows(
+ dataset_id="",
+ )
+
+ @parametrize
+ def test_method_register(self, client: LlamaStackClient) -> None:
+ dataset = client.datasets.register(
+ purpose="post-training/messages",
+ source={
+ "type": "uri",
+ "uri": "uri",
+ },
+ )
+ assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
+
+ @parametrize
+ def test_method_register_with_all_params(self, client: LlamaStackClient) -> None:
+ dataset = client.datasets.register(
+ purpose="post-training/messages",
+ source={
+ "type": "uri",
+ "uri": "uri",
+ },
+ dataset_id="dataset_id",
+ metadata={"foo": True},
+ )
+ assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
+
+ @parametrize
+ def test_raw_response_register(self, client: LlamaStackClient) -> None:
+ response = client.datasets.with_raw_response.register(
+ purpose="post-training/messages",
+ source={
+ "type": "uri",
+ "uri": "uri",
+ },
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ dataset = response.parse()
+ assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
+
+ @parametrize
+ def test_streaming_response_register(self, client: LlamaStackClient) -> None:
+ with client.datasets.with_streaming_response.register(
+ purpose="post-training/messages",
+ source={
+ "type": "uri",
+ "uri": "uri",
+ },
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ dataset = response.parse()
+ assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_unregister(self, client: LlamaStackClient) -> None:
+ dataset = client.datasets.unregister(
+ "dataset_id",
+ )
+ assert dataset is None
+
+ @parametrize
+ def test_raw_response_unregister(self, client: LlamaStackClient) -> None:
+ response = client.datasets.with_raw_response.unregister(
+ "dataset_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ dataset = response.parse()
+ assert dataset is None
+
+ @parametrize
+ def test_streaming_response_unregister(self, client: LlamaStackClient) -> None:
+ with client.datasets.with_streaming_response.unregister(
+ "dataset_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ dataset = response.parse()
+ assert dataset is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_unregister(self, client: LlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
+ client.datasets.with_raw_response.unregister(
+ "",
+ )
+
+
+class TestAsyncDatasets:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ dataset = await async_client.datasets.retrieve(
+ "dataset_id",
+ )
+ assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.datasets.with_raw_response.retrieve(
+ "dataset_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ dataset = await response.parse()
+ assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.datasets.with_streaming_response.retrieve(
+ "dataset_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ dataset = await response.parse()
+ assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
+ await async_client.datasets.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ dataset = await async_client.datasets.list()
+ assert_matches_type(DatasetListResponse, dataset, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.datasets.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ dataset = await response.parse()
+ assert_matches_type(DatasetListResponse, dataset, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.datasets.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ dataset = await response.parse()
+ assert_matches_type(DatasetListResponse, dataset, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
+ dataset = await async_client.datasets.appendrows(
+ dataset_id="dataset_id",
+ rows=[{"foo": True}],
+ )
+ assert dataset is None
+
+ @parametrize
+ async def test_raw_response_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.datasets.with_raw_response.appendrows(
+ dataset_id="dataset_id",
+ rows=[{"foo": True}],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ dataset = await response.parse()
+ assert dataset is None
+
+ @parametrize
+ async def test_streaming_response_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.datasets.with_streaming_response.appendrows(
+ dataset_id="dataset_id",
+ rows=[{"foo": True}],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ dataset = await response.parse()
+ assert dataset is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
+ await async_client.datasets.with_raw_response.appendrows(
+ dataset_id="",
+ rows=[{"foo": True}],
+ )
+
+ @parametrize
+ async def test_method_iterrows(self, async_client: AsyncLlamaStackClient) -> None:
+ dataset = await async_client.datasets.iterrows(
+ dataset_id="dataset_id",
+ )
+ assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
+
+ @parametrize
+ async def test_method_iterrows_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ dataset = await async_client.datasets.iterrows(
+ dataset_id="dataset_id",
+ limit=0,
+ start_index=0,
+ )
+ assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
+
+ @parametrize
+ async def test_raw_response_iterrows(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.datasets.with_raw_response.iterrows(
+ dataset_id="dataset_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ dataset = await response.parse()
+ assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_iterrows(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.datasets.with_streaming_response.iterrows(
+ dataset_id="dataset_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ dataset = await response.parse()
+ assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_iterrows(self, async_client: AsyncLlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
+ await async_client.datasets.with_raw_response.iterrows(
+ dataset_id="",
+ )
+
+ @parametrize
+ async def test_method_register(self, async_client: AsyncLlamaStackClient) -> None:
+ dataset = await async_client.datasets.register(
+ purpose="post-training/messages",
+ source={
+ "type": "uri",
+ "uri": "uri",
+ },
+ )
+ assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
+
+ @parametrize
+ async def test_method_register_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ dataset = await async_client.datasets.register(
+ purpose="post-training/messages",
+ source={
+ "type": "uri",
+ "uri": "uri",
+ },
+ dataset_id="dataset_id",
+ metadata={"foo": True},
+ )
+ assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
+
+ @parametrize
+ async def test_raw_response_register(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.datasets.with_raw_response.register(
+ purpose="post-training/messages",
+ source={
+ "type": "uri",
+ "uri": "uri",
+ },
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ dataset = await response.parse()
+ assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_register(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.datasets.with_streaming_response.register(
+ purpose="post-training/messages",
+ source={
+ "type": "uri",
+ "uri": "uri",
+ },
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ dataset = await response.parse()
+ assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_unregister(self, async_client: AsyncLlamaStackClient) -> None:
+ dataset = await async_client.datasets.unregister(
+ "dataset_id",
+ )
+ assert dataset is None
+
+ @parametrize
+ async def test_raw_response_unregister(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.datasets.with_raw_response.unregister(
+ "dataset_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ dataset = await response.parse()
+ assert dataset is None
+
+ @parametrize
+ async def test_streaming_response_unregister(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.datasets.with_streaming_response.unregister(
+ "dataset_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ dataset = await response.parse()
+ assert dataset is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_unregister(self, async_client: AsyncLlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
+ await async_client.datasets.with_raw_response.unregister(
+ "",
+ )
diff --git a/tests/api_resources/test_telemetry.py b/tests/api_resources/test_telemetry.py
index 6191757b..07075c52 100644
--- a/tests/api_resources/test_telemetry.py
+++ b/tests/api_resources/test_telemetry.py
@@ -7,8 +7,16 @@
import pytest
+from tests.utils import assert_matches_type
from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client._utils import parse_datetime
+from llama_stack_client.types import (
+ Trace,
+ TelemetryGetSpanResponse,
+ TelemetryQuerySpansResponse,
+ TelemetryGetSpanTreeResponse,
+ TelemetryQueryTracesResponse,
+ TelemetryQueryMetricsResponse,
+)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -17,48 +25,366 @@ class TestTelemetry:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_log_event(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- },
- ttl_seconds=0,
+ def test_method_get_span(self, client: LlamaStackClient) -> None:
+ telemetry = client.telemetry.get_span(
+ span_id="span_id",
+ trace_id="trace_id",
+ )
+ assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"])
+
+ @parametrize
+ def test_raw_response_get_span(self, client: LlamaStackClient) -> None:
+ response = client.telemetry.with_raw_response.get_span(
+ span_id="span_id",
+ trace_id="trace_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ telemetry = response.parse()
+ assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"])
+
+ @parametrize
+ def test_streaming_response_get_span(self, client: LlamaStackClient) -> None:
+ with client.telemetry.with_streaming_response.get_span(
+ span_id="span_id",
+ trace_id="trace_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ telemetry = response.parse()
+ assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_get_span(self, client: LlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `trace_id` but received ''"):
+ client.telemetry.with_raw_response.get_span(
+ span_id="span_id",
+ trace_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `span_id` but received ''"):
+ client.telemetry.with_raw_response.get_span(
+ span_id="",
+ trace_id="trace_id",
+ )
+
+ @parametrize
+ def test_method_get_span_tree(self, client: LlamaStackClient) -> None:
+ telemetry = client.telemetry.get_span_tree(
+ span_id="span_id",
+ )
+ assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
+
+ @parametrize
+ def test_method_get_span_tree_with_all_params(self, client: LlamaStackClient) -> None:
+ telemetry = client.telemetry.get_span_tree(
+ span_id="span_id",
+ attributes_to_return=["string"],
+ max_depth=0,
+ )
+ assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
+
+ @parametrize
+ def test_raw_response_get_span_tree(self, client: LlamaStackClient) -> None:
+ response = client.telemetry.with_raw_response.get_span_tree(
+ span_id="span_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ telemetry = response.parse()
+ assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
+
+ @parametrize
+ def test_streaming_response_get_span_tree(self, client: LlamaStackClient) -> None:
+ with client.telemetry.with_streaming_response.get_span_tree(
+ span_id="span_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ telemetry = response.parse()
+ assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_get_span_tree(self, client: LlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `span_id` but received ''"):
+ client.telemetry.with_raw_response.get_span_tree(
+ span_id="",
+ )
+
+ @parametrize
+ def test_method_get_trace(self, client: LlamaStackClient) -> None:
+ telemetry = client.telemetry.get_trace(
+ "trace_id",
+ )
+ assert_matches_type(Trace, telemetry, path=["response"])
+
+ @parametrize
+ def test_raw_response_get_trace(self, client: LlamaStackClient) -> None:
+ response = client.telemetry.with_raw_response.get_trace(
+ "trace_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ telemetry = response.parse()
+ assert_matches_type(Trace, telemetry, path=["response"])
+
+ @parametrize
+ def test_streaming_response_get_trace(self, client: LlamaStackClient) -> None:
+ with client.telemetry.with_streaming_response.get_trace(
+ "trace_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ telemetry = response.parse()
+ assert_matches_type(Trace, telemetry, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_get_trace(self, client: LlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `trace_id` but received ''"):
+ client.telemetry.with_raw_response.get_trace(
+ "",
+ )
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_method_query_metrics(self, client: LlamaStackClient) -> None:
+ telemetry = client.telemetry.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ )
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_method_query_metrics_with_all_params(self, client: LlamaStackClient) -> None:
+ telemetry = client.telemetry.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ end_time=0,
+ granularity="granularity",
+ label_matchers=[
+ {
+ "name": "name",
+ "operator": "=",
+ "value": "value",
+ }
+ ],
+ )
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_raw_response_query_metrics(self, client: LlamaStackClient) -> None:
+ response = client.telemetry.with_raw_response.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ telemetry = response.parse()
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_streaming_response_query_metrics(self, client: LlamaStackClient) -> None:
+ with client.telemetry.with_streaming_response.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ telemetry = response.parse()
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_path_params_query_metrics(self, client: LlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `metric_name` but received ''"):
+ client.telemetry.with_raw_response.query_metrics(
+ metric_name="",
+ query_type="range",
+ start_time=0,
+ )
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_method_query_spans(self, client: LlamaStackClient) -> None:
+ telemetry = client.telemetry.query_spans(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_return=["string"],
+ )
+ assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_method_query_spans_with_all_params(self, client: LlamaStackClient) -> None:
+ telemetry = client.telemetry.query_spans(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_return=["string"],
+ max_depth=0,
+ )
+ assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_raw_response_query_spans(self, client: LlamaStackClient) -> None:
+ response = client.telemetry.with_raw_response.query_spans(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_return=["string"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ telemetry = response.parse()
+ assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_streaming_response_query_spans(self, client: LlamaStackClient) -> None:
+ with client.telemetry.with_streaming_response.query_spans(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_return=["string"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ telemetry = response.parse()
+ assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_method_query_traces(self, client: LlamaStackClient) -> None:
+ telemetry = client.telemetry.query_traces()
+ assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_method_query_traces_with_all_params(self, client: LlamaStackClient) -> None:
+ telemetry = client.telemetry.query_traces(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ limit=0,
+ offset=0,
+ order_by=["string"],
+ )
+ assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_raw_response_query_traces(self, client: LlamaStackClient) -> None:
+ response = client.telemetry.with_raw_response.query_traces()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ telemetry = response.parse()
+ assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_streaming_response_query_traces(self, client: LlamaStackClient) -> None:
+ with client.telemetry.with_streaming_response.query_traces() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ telemetry = response.parse()
+ assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_save_spans_to_dataset(self, client: LlamaStackClient) -> None:
+ telemetry = client.telemetry.save_spans_to_dataset(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_save=["string"],
+ dataset_id="dataset_id",
)
assert telemetry is None
@parametrize
- def test_method_log_event_with_all_params(self, client: LlamaStackClient) -> None:
- telemetry = client.telemetry.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- "attributes": {"foo": "string"},
- },
- ttl_seconds=0,
+ def test_method_save_spans_to_dataset_with_all_params(self, client: LlamaStackClient) -> None:
+ telemetry = client.telemetry.save_spans_to_dataset(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_save=["string"],
+ dataset_id="dataset_id",
+ max_depth=0,
)
assert telemetry is None
@parametrize
- def test_raw_response_log_event(self, client: LlamaStackClient) -> None:
- response = client.telemetry.with_raw_response.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- },
- ttl_seconds=0,
+ def test_raw_response_save_spans_to_dataset(self, client: LlamaStackClient) -> None:
+ response = client.telemetry.with_raw_response.save_spans_to_dataset(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_save=["string"],
+ dataset_id="dataset_id",
)
assert response.is_closed is True
@@ -67,17 +393,17 @@ def test_raw_response_log_event(self, client: LlamaStackClient) -> None:
assert telemetry is None
@parametrize
- def test_streaming_response_log_event(self, client: LlamaStackClient) -> None:
- with client.telemetry.with_streaming_response.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- },
- ttl_seconds=0,
+ def test_streaming_response_save_spans_to_dataset(self, client: LlamaStackClient) -> None:
+ with client.telemetry.with_streaming_response.save_spans_to_dataset(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_save=["string"],
+ dataset_id="dataset_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -94,48 +420,366 @@ class TestAsyncTelemetry:
)
@parametrize
- async def test_method_log_event(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- },
- ttl_seconds=0,
+ async def test_method_get_span(self, async_client: AsyncLlamaStackClient) -> None:
+ telemetry = await async_client.telemetry.get_span(
+ span_id="span_id",
+ trace_id="trace_id",
+ )
+ assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"])
+
+ @parametrize
+ async def test_raw_response_get_span(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.telemetry.with_raw_response.get_span(
+ span_id="span_id",
+ trace_id="trace_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ telemetry = await response.parse()
+ assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_get_span(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.telemetry.with_streaming_response.get_span(
+ span_id="span_id",
+ trace_id="trace_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ telemetry = await response.parse()
+ assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_get_span(self, async_client: AsyncLlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `trace_id` but received ''"):
+ await async_client.telemetry.with_raw_response.get_span(
+ span_id="span_id",
+ trace_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `span_id` but received ''"):
+ await async_client.telemetry.with_raw_response.get_span(
+ span_id="",
+ trace_id="trace_id",
+ )
+
+ @parametrize
+ async def test_method_get_span_tree(self, async_client: AsyncLlamaStackClient) -> None:
+ telemetry = await async_client.telemetry.get_span_tree(
+ span_id="span_id",
+ )
+ assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
+
+ @parametrize
+ async def test_method_get_span_tree_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ telemetry = await async_client.telemetry.get_span_tree(
+ span_id="span_id",
+ attributes_to_return=["string"],
+ max_depth=0,
+ )
+ assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
+
+ @parametrize
+ async def test_raw_response_get_span_tree(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.telemetry.with_raw_response.get_span_tree(
+ span_id="span_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ telemetry = await response.parse()
+ assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_get_span_tree(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.telemetry.with_streaming_response.get_span_tree(
+ span_id="span_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ telemetry = await response.parse()
+ assert_matches_type(TelemetryGetSpanTreeResponse, telemetry, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_get_span_tree(self, async_client: AsyncLlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `span_id` but received ''"):
+ await async_client.telemetry.with_raw_response.get_span_tree(
+ span_id="",
+ )
+
+ @parametrize
+ async def test_method_get_trace(self, async_client: AsyncLlamaStackClient) -> None:
+ telemetry = await async_client.telemetry.get_trace(
+ "trace_id",
+ )
+ assert_matches_type(Trace, telemetry, path=["response"])
+
+ @parametrize
+ async def test_raw_response_get_trace(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.telemetry.with_raw_response.get_trace(
+ "trace_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ telemetry = await response.parse()
+ assert_matches_type(Trace, telemetry, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_get_trace(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.telemetry.with_streaming_response.get_trace(
+ "trace_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ telemetry = await response.parse()
+ assert_matches_type(Trace, telemetry, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_get_trace(self, async_client: AsyncLlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `trace_id` but received ''"):
+ await async_client.telemetry.with_raw_response.get_trace(
+ "",
+ )
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_method_query_metrics(self, async_client: AsyncLlamaStackClient) -> None:
+ telemetry = await async_client.telemetry.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ )
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_method_query_metrics_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ telemetry = await async_client.telemetry.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ end_time=0,
+ granularity="granularity",
+ label_matchers=[
+ {
+ "name": "name",
+ "operator": "=",
+ "value": "value",
+ }
+ ],
+ )
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_raw_response_query_metrics(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.telemetry.with_raw_response.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ telemetry = await response.parse()
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_streaming_response_query_metrics(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.telemetry.with_streaming_response.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ telemetry = await response.parse()
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_path_params_query_metrics(self, async_client: AsyncLlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `metric_name` but received ''"):
+ await async_client.telemetry.with_raw_response.query_metrics(
+ metric_name="",
+ query_type="range",
+ start_time=0,
+ )
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_method_query_spans(self, async_client: AsyncLlamaStackClient) -> None:
+ telemetry = await async_client.telemetry.query_spans(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_return=["string"],
+ )
+ assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_method_query_spans_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ telemetry = await async_client.telemetry.query_spans(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_return=["string"],
+ max_depth=0,
+ )
+ assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_raw_response_query_spans(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.telemetry.with_raw_response.query_spans(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_return=["string"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ telemetry = await response.parse()
+ assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_streaming_response_query_spans(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.telemetry.with_streaming_response.query_spans(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_return=["string"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ telemetry = await response.parse()
+ assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_method_query_traces(self, async_client: AsyncLlamaStackClient) -> None:
+ telemetry = await async_client.telemetry.query_traces()
+ assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_method_query_traces_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ telemetry = await async_client.telemetry.query_traces(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ limit=0,
+ offset=0,
+ order_by=["string"],
+ )
+ assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_raw_response_query_traces(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.telemetry.with_raw_response.query_traces()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ telemetry = await response.parse()
+ assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_streaming_response_query_traces(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.telemetry.with_streaming_response.query_traces() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ telemetry = await response.parse()
+ assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_save_spans_to_dataset(self, async_client: AsyncLlamaStackClient) -> None:
+ telemetry = await async_client.telemetry.save_spans_to_dataset(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_save=["string"],
+ dataset_id="dataset_id",
)
assert telemetry is None
@parametrize
- async def test_method_log_event_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- telemetry = await async_client.telemetry.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- "attributes": {"foo": "string"},
- },
- ttl_seconds=0,
+ async def test_method_save_spans_to_dataset_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ telemetry = await async_client.telemetry.save_spans_to_dataset(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_save=["string"],
+ dataset_id="dataset_id",
+ max_depth=0,
)
assert telemetry is None
@parametrize
- async def test_raw_response_log_event(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.telemetry.with_raw_response.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- },
- ttl_seconds=0,
+ async def test_raw_response_save_spans_to_dataset(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.telemetry.with_raw_response.save_spans_to_dataset(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_save=["string"],
+ dataset_id="dataset_id",
)
assert response.is_closed is True
@@ -144,17 +788,17 @@ async def test_raw_response_log_event(self, async_client: AsyncLlamaStackClient)
assert telemetry is None
@parametrize
- async def test_streaming_response_log_event(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.telemetry.with_streaming_response.log_event(
- event={
- "message": "message",
- "severity": "verbose",
- "span_id": "span_id",
- "timestamp": parse_datetime("2019-12-27T18:11:19.117Z"),
- "trace_id": "trace_id",
- "type": "unstructured_log",
- },
- ttl_seconds=0,
+ async def test_streaming_response_save_spans_to_dataset(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.telemetry.with_streaming_response.save_spans_to_dataset(
+ attribute_filters=[
+ {
+ "key": "key",
+ "op": "eq",
+ "value": True,
+ }
+ ],
+ attributes_to_save=["string"],
+ dataset_id="dataset_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
From a41fdb1089f180f612e4fee2204217099c1dddd0 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 2 Oct 2025 20:08:30 +0000
Subject: [PATCH 6/9] fix(api): another fix to capture correct
responses.create() params
---
.stats.yml | 4 +-
api.md | 3 +-
.../resources/responses/responses.py | 300 ++++++--
src/llama_stack_client/types/__init__.py | 1 -
.../types/response_create_params.py | 448 +++++++++++-
.../types/response_create_response.py | 640 ------------------
tests/api_resources/test_responses.py | 233 +++++--
7 files changed, 849 insertions(+), 780 deletions(-)
delete mode 100644 src/llama_stack_client/types/response_create_response.py
diff --git a/.stats.yml b/.stats.yml
index a88d26a5..5588dfb4 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 108
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f26df77f0800baeaea40407776f6c1e618756037969411e29de209ce961655dd.yml
-openapi_spec_hash: e7c2329edc0f9f5aa1c78b6afb996e1c
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-b220f9f8667d2af8007134d0403b24452c20c9c512ca87d0b69b20b761272609.yml
+openapi_spec_hash: cde1096a830f2081d68f858f020fd53f
config_hash: 8800bdff1a087b9d5211dda2a7b9f66f
diff --git a/api.md b/api.md
index 777cc6ff..3319e27c 100644
--- a/api.md
+++ b/api.md
@@ -78,7 +78,6 @@ Types:
from llama_stack_client.types import (
ResponseObject,
ResponseObjectStream,
- ResponseCreateResponse,
ResponseListResponse,
ResponseDeleteResponse,
)
@@ -86,7 +85,7 @@ from llama_stack_client.types import (
Methods:
-- client.responses.create(\*\*params) -> ResponseCreateResponse
+- client.responses.create(\*\*params) -> ResponseObject
- client.responses.retrieve(response_id) -> ResponseObject
- client.responses.list(\*\*params) -> SyncOpenAICursorPage[ResponseListResponse]
- client.responses.delete(response_id) -> ResponseDeleteResponse
diff --git a/src/llama_stack_client/resources/responses/responses.py b/src/llama_stack_client/resources/responses/responses.py
index 418001eb..16e38fd0 100644
--- a/src/llama_stack_client/resources/responses/responses.py
+++ b/src/llama_stack_client/resources/responses/responses.py
@@ -2,13 +2,14 @@
from __future__ import annotations
+from typing import Union, Iterable
from typing_extensions import Literal, overload
import httpx
from ...types import response_list_params, response_create_params
-from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ..._utils import required_args, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -31,7 +32,6 @@
from ...types.response_object import ResponseObject
from ...types.response_list_response import ResponseListResponse
from ...types.response_object_stream import ResponseObjectStream
-from ...types.response_create_response import ResponseCreateResponse
from ...types.response_delete_response import ResponseDeleteResponse
__all__ = ["ResponsesResource", "AsyncResponsesResource"]
@@ -65,28 +65,39 @@ def with_streaming_response(self) -> ResponsesResourceWithStreamingResponse:
def create(
self,
*,
- after: str | Omit = omit,
- limit: int | Omit = omit,
- model: str | Omit = omit,
- order: Literal["asc", "desc"] | Omit = omit,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ include: SequenceNotStr[str] | Omit = omit,
+ instructions: str | Omit = omit,
+ max_infer_iters: int | Omit = omit,
+ previous_response_id: str | Omit = omit,
+ store: bool | Omit = omit,
+ stream: Literal[False] | Omit = omit,
+ temperature: float | Omit = omit,
+ text: response_create_params.Text | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ResponseCreateResponse:
+ ) -> ResponseObject:
"""
- List all OpenAI responses.
+ Create a new OpenAI response.
Args:
- after: The ID of the last response to return.
+ input: Input message(s) to create the response.
- limit: The number of responses to return.
+ model: The underlying LLM used for completions.
- model: The model to filter responses by.
+ include: (Optional) Additional fields to include in the response.
- order: The order to sort responses by when sorted by created_at ('asc' or 'desc').
+ previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
+ response. This can be used to easily fork-off new responses from existing
+ responses.
+
+ text: Text response configuration for OpenAI responses.
extra_headers: Send extra headers
@@ -102,28 +113,39 @@ def create(
def create(
self,
*,
- after: str | Omit = omit,
- limit: int | Omit = omit,
- model: str | Omit = omit,
- order: Literal["asc", "desc"] | Omit = omit,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ stream: Literal[True],
+ include: SequenceNotStr[str] | Omit = omit,
+ instructions: str | Omit = omit,
+ max_infer_iters: int | Omit = omit,
+ previous_response_id: str | Omit = omit,
+ store: bool | Omit = omit,
+ temperature: float | Omit = omit,
+ text: response_create_params.Text | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ResponseCreateResponse:
+ ) -> Stream[ResponseObjectStream]:
"""
- List all OpenAI responses.
+ Create a new OpenAI response.
Args:
- after: The ID of the last response to return.
+ input: Input message(s) to create the response.
- limit: The number of responses to return.
+ model: The underlying LLM used for completions.
- model: The model to filter responses by.
+ include: (Optional) Additional fields to include in the response.
- order: The order to sort responses by when sorted by created_at ('asc' or 'desc').
+ previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
+ response. This can be used to easily fork-off new responses from existing
+ responses.
+
+ text: Text response configuration for OpenAI responses.
extra_headers: Send extra headers
@@ -135,35 +157,100 @@ def create(
"""
...
+ @overload
def create(
self,
*,
- after: str | Omit = omit,
- limit: int | Omit = omit,
- model: str | Omit = omit,
- order: Literal["asc", "desc"] | Omit = omit,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ stream: bool,
+ include: SequenceNotStr[str] | Omit = omit,
+ instructions: str | Omit = omit,
+ max_infer_iters: int | Omit = omit,
+ previous_response_id: str | Omit = omit,
+ store: bool | Omit = omit,
+ temperature: float | Omit = omit,
+ text: response_create_params.Text | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ResponseObject | Stream[ResponseObjectStream]:
+ """
+ Create a new OpenAI response.
+
+ Args:
+ input: Input message(s) to create the response.
+
+ model: The underlying LLM used for completions.
+
+ include: (Optional) Additional fields to include in the response.
+
+ previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
+ response. This can be used to easily fork-off new responses from existing
+ responses.
+
+ text: Text response configuration for OpenAI responses.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["input", "model"], ["input", "model", "stream"])
+ def create(
+ self,
+ *,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ include: SequenceNotStr[str] | Omit = omit,
+ instructions: str | Omit = omit,
+ max_infer_iters: int | Omit = omit,
+ previous_response_id: str | Omit = omit,
+ store: bool | Omit = omit,
+ stream: Literal[False] | Literal[True] | Omit = omit,
+ temperature: float | Omit = omit,
+ text: response_create_params.Text | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ResponseCreateResponse | Stream[ResponseObjectStream]:
+ ) -> ResponseObject | Stream[ResponseObjectStream]:
return self._post(
"/v1/responses",
body=maybe_transform(
{
- "after": after,
- "limit": limit,
+ "input": input,
"model": model,
- "order": order,
+ "include": include,
+ "instructions": instructions,
+ "max_infer_iters": max_infer_iters,
+ "previous_response_id": previous_response_id,
+ "store": store,
+ "stream": stream,
+ "temperature": temperature,
+ "text": text,
+ "tools": tools,
},
- response_create_params.ResponseCreateParams,
+ response_create_params.ResponseCreateParamsStreaming
+ if stream
+ else response_create_params.ResponseCreateParamsNonStreaming,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=ResponseCreateResponse,
+ cast_to=ResponseObject,
stream=stream or False,
stream_cls=Stream[ResponseObjectStream],
)
@@ -318,28 +405,39 @@ def with_streaming_response(self) -> AsyncResponsesResourceWithStreamingResponse
async def create(
self,
*,
- after: str | Omit = omit,
- limit: int | Omit = omit,
- model: str | Omit = omit,
- order: Literal["asc", "desc"] | Omit = omit,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ include: SequenceNotStr[str] | Omit = omit,
+ instructions: str | Omit = omit,
+ max_infer_iters: int | Omit = omit,
+ previous_response_id: str | Omit = omit,
+ store: bool | Omit = omit,
+ stream: Literal[False] | Omit = omit,
+ temperature: float | Omit = omit,
+ text: response_create_params.Text | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ResponseCreateResponse:
+ ) -> ResponseObject:
"""
- List all OpenAI responses.
+ Create a new OpenAI response.
Args:
- after: The ID of the last response to return.
+ input: Input message(s) to create the response.
- limit: The number of responses to return.
+ model: The underlying LLM used for completions.
- model: The model to filter responses by.
+ include: (Optional) Additional fields to include in the response.
- order: The order to sort responses by when sorted by created_at ('asc' or 'desc').
+ previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
+ response. This can be used to easily fork-off new responses from existing
+ responses.
+
+ text: Text response configuration for OpenAI responses.
extra_headers: Send extra headers
@@ -355,28 +453,39 @@ async def create(
async def create(
self,
*,
- after: str | Omit = omit,
- limit: int | Omit = omit,
- model: str | Omit = omit,
- order: Literal["asc", "desc"] | Omit = omit,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ stream: Literal[True],
+ include: SequenceNotStr[str] | Omit = omit,
+ instructions: str | Omit = omit,
+ max_infer_iters: int | Omit = omit,
+ previous_response_id: str | Omit = omit,
+ store: bool | Omit = omit,
+ temperature: float | Omit = omit,
+ text: response_create_params.Text | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ResponseCreateResponse:
+ ) -> AsyncStream[ResponseObjectStream]:
"""
- List all OpenAI responses.
+ Create a new OpenAI response.
Args:
- after: The ID of the last response to return.
+ input: Input message(s) to create the response.
- limit: The number of responses to return.
+ model: The underlying LLM used for completions.
- model: The model to filter responses by.
+ include: (Optional) Additional fields to include in the response.
- order: The order to sort responses by when sorted by created_at ('asc' or 'desc').
+ previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
+ response. This can be used to easily fork-off new responses from existing
+ responses.
+
+ text: Text response configuration for OpenAI responses.
extra_headers: Send extra headers
@@ -388,35 +497,100 @@ async def create(
"""
...
+ @overload
async def create(
self,
*,
- after: str | Omit = omit,
- limit: int | Omit = omit,
- model: str | Omit = omit,
- order: Literal["asc", "desc"] | Omit = omit,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ stream: bool,
+ include: SequenceNotStr[str] | Omit = omit,
+ instructions: str | Omit = omit,
+ max_infer_iters: int | Omit = omit,
+ previous_response_id: str | Omit = omit,
+ store: bool | Omit = omit,
+ temperature: float | Omit = omit,
+ text: response_create_params.Text | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ResponseObject | AsyncStream[ResponseObjectStream]:
+ """
+ Create a new OpenAI response.
+
+ Args:
+ input: Input message(s) to create the response.
+
+ model: The underlying LLM used for completions.
+
+ include: (Optional) Additional fields to include in the response.
+
+ previous_response_id: (Optional) if specified, the new response will be a continuation of the previous
+ response. This can be used to easily fork-off new responses from existing
+ responses.
+
+ text: Text response configuration for OpenAI responses.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["input", "model"], ["input", "model", "stream"])
+ async def create(
+ self,
+ *,
+ input: Union[str, Iterable[response_create_params.InputUnionMember1]],
+ model: str,
+ include: SequenceNotStr[str] | Omit = omit,
+ instructions: str | Omit = omit,
+ max_infer_iters: int | Omit = omit,
+ previous_response_id: str | Omit = omit,
+ store: bool | Omit = omit,
+ stream: Literal[False] | Literal[True] | Omit = omit,
+ temperature: float | Omit = omit,
+ text: response_create_params.Text | Omit = omit,
+ tools: Iterable[response_create_params.Tool] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ResponseCreateResponse | AsyncStream[ResponseObjectStream]:
+ ) -> ResponseObject | AsyncStream[ResponseObjectStream]:
return await self._post(
"/v1/responses",
body=await async_maybe_transform(
{
- "after": after,
- "limit": limit,
+ "input": input,
"model": model,
- "order": order,
+ "include": include,
+ "instructions": instructions,
+ "max_infer_iters": max_infer_iters,
+ "previous_response_id": previous_response_id,
+ "store": store,
+ "stream": stream,
+ "temperature": temperature,
+ "text": text,
+ "tools": tools,
},
- response_create_params.ResponseCreateParams,
+ response_create_params.ResponseCreateParamsStreaming
+ if stream
+ else response_create_params.ResponseCreateParamsNonStreaming,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=ResponseCreateResponse,
+ cast_to=ResponseObject,
stream=stream or False,
stream_cls=AsyncStream[ResponseObjectStream],
)
diff --git a/src/llama_stack_client/types/__init__.py b/src/llama_stack_client/types/__init__.py
index eeed2336..2b89de40 100644
--- a/src/llama_stack_client/types/__init__.py
+++ b/src/llama_stack_client/types/__init__.py
@@ -82,7 +82,6 @@
from .list_benchmarks_response import ListBenchmarksResponse as ListBenchmarksResponse
from .list_vector_dbs_response import ListVectorDBsResponse as ListVectorDBsResponse
from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
-from .response_create_response import ResponseCreateResponse as ResponseCreateResponse
from .response_delete_response import ResponseDeleteResponse as ResponseDeleteResponse
from .safety_run_shield_params import SafetyRunShieldParams as SafetyRunShieldParams
from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
diff --git a/src/llama_stack_client/types/response_create_params.py b/src/llama_stack_client/types/response_create_params.py
index c5021596..daf7f6cf 100644
--- a/src/llama_stack_client/types/response_create_params.py
+++ b/src/llama_stack_client/types/response_create_params.py
@@ -2,32 +2,450 @@
from __future__ import annotations
-from typing import Union
-from typing_extensions import Literal, TypedDict
+from typing import Dict, Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
-__all__ = ["ResponseCreateParamsBase", "ResponseCreateParamsNonStreaming"]
+from .._types import SequenceNotStr
+
+__all__ = [
+ "ResponseCreateParamsBase",
+ "InputUnionMember1",
+ "InputUnionMember1OpenAIResponseOutputMessageWebSearchToolCall",
+ "InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall",
+ "InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult",
+ "InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall",
+ "InputUnionMember1OpenAIResponseInputFunctionToolCallOutput",
+ "InputUnionMember1OpenAIResponseMcpApprovalRequest",
+ "InputUnionMember1OpenAIResponseMcpApprovalResponse",
+ "InputUnionMember1OpenAIResponseMessage",
+ "InputUnionMember1OpenAIResponseMessageContentUnionMember1",
+ "InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
+ "InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage",
+ "InputUnionMember1OpenAIResponseMessageContentUnionMember2",
+ "InputUnionMember1OpenAIResponseMessageContentUnionMember2Annotation",
+ "InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation",
+ "InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation",
+ "InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation",
+ "InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath",
+ "Text",
+ "TextFormat",
+ "Tool",
+ "ToolOpenAIResponseInputToolWebSearch",
+ "ToolOpenAIResponseInputToolFileSearch",
+ "ToolOpenAIResponseInputToolFileSearchRankingOptions",
+ "ToolOpenAIResponseInputToolFunction",
+ "ToolOpenAIResponseInputToolMcp",
+ "ToolOpenAIResponseInputToolMcpRequireApproval",
+ "ToolOpenAIResponseInputToolMcpRequireApprovalApprovalFilter",
+ "ToolOpenAIResponseInputToolMcpAllowedTools",
+ "ToolOpenAIResponseInputToolMcpAllowedToolsAllowedToolsFilter",
+ "ResponseCreateParamsNonStreaming",
+ "ResponseCreateParamsStreaming",
+]
class ResponseCreateParamsBase(TypedDict, total=False):
- after: str
- """The ID of the last response to return."""
+ input: Required[Union[str, Iterable[InputUnionMember1]]]
+ """Input message(s) to create the response."""
- limit: int
- """The number of responses to return."""
+ model: Required[str]
+ """The underlying LLM used for completions."""
- model: str
- """The model to filter responses by."""
+ include: SequenceNotStr[str]
+ """(Optional) Additional fields to include in the response."""
- order: Literal["asc", "desc"]
- """The order to sort responses by when sorted by created_at ('asc' or 'desc')."""
+ instructions: str
+ max_infer_iters: int
-class ResponseCreateParamsNonStreaming(ResponseCreateParamsBase, total=False):
- pass
+ previous_response_id: str
+ """
+ (Optional) if specified, the new response will be a continuation of the previous
+ response. This can be used to easily fork-off new responses from existing
+ responses.
+ """
+
+ store: bool
+
+ temperature: float
+
+ text: Text
+ """Text response configuration for OpenAI responses."""
+
+ tools: Iterable[Tool]
+
+
+class InputUnionMember1OpenAIResponseOutputMessageWebSearchToolCall(TypedDict, total=False):
+ id: Required[str]
+ """Unique identifier for this tool call"""
+
+ status: Required[str]
+ """Current status of the web search operation"""
+
+ type: Required[Literal["web_search_call"]]
+ """Tool call type identifier, always "web_search_call" """
+
+
+class InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult(TypedDict, total=False):
+ attributes: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]]
+ """(Optional) Key-value attributes associated with the file"""
+
+ file_id: Required[str]
+ """Unique identifier of the file containing the result"""
+
+ filename: Required[str]
+ """Name of the file containing the result"""
+
+ score: Required[float]
+ """Relevance score for this search result (between 0 and 1)"""
+
+ text: Required[str]
+ """Text content of the search result"""
+
+
+class InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall(TypedDict, total=False):
+ id: Required[str]
+ """Unique identifier for this tool call"""
+
+ queries: Required[SequenceNotStr[str]]
+ """List of search queries executed"""
+
+ status: Required[str]
+ """Current status of the file search operation"""
+
+ type: Required[Literal["file_search_call"]]
+ """Tool call type identifier, always "file_search_call" """
+
+ results: Iterable[InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult]
+ """(Optional) Search results returned by the file search operation"""
+
+
+class InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall(TypedDict, total=False):
+ arguments: Required[str]
+ """JSON string containing the function arguments"""
+
+ call_id: Required[str]
+ """Unique identifier for the function call"""
+
+ name: Required[str]
+ """Name of the function being called"""
+
+ type: Required[Literal["function_call"]]
+ """Tool call type identifier, always "function_call" """
+
+ id: str
+ """(Optional) Additional identifier for the tool call"""
+
+ status: str
+ """(Optional) Current status of the function call execution"""
+
+
+class InputUnionMember1OpenAIResponseInputFunctionToolCallOutput(TypedDict, total=False):
+ call_id: Required[str]
+
+ output: Required[str]
+
+ type: Required[Literal["function_call_output"]]
+
+ id: str
+
+ status: str
+
+
+class InputUnionMember1OpenAIResponseMcpApprovalRequest(TypedDict, total=False):
+ id: Required[str]
+
+ arguments: Required[str]
+
+ name: Required[str]
+
+ server_label: Required[str]
+
+ type: Required[Literal["mcp_approval_request"]]
+
+
+class InputUnionMember1OpenAIResponseMcpApprovalResponse(TypedDict, total=False):
+ approval_request_id: Required[str]
+
+ approve: Required[bool]
+
+ type: Required[Literal["mcp_approval_response"]]
+
+ id: str
+
+ reason: str
+
+
+class InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(
+ TypedDict, total=False
+):
+ text: Required[str]
+ """The text content of the input message"""
+
+ type: Required[Literal["input_text"]]
+ """Content type identifier, always "input_text" """
+
+
+class InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(
+ TypedDict, total=False
+):
+ detail: Required[Literal["low", "high", "auto"]]
+ """Level of detail for image processing, can be "low", "high", or "auto" """
+
+ type: Required[Literal["input_image"]]
+ """Content type identifier, always "input_image" """
+
+ image_url: str
+ """(Optional) URL of the image content"""
+
+
+InputUnionMember1OpenAIResponseMessageContentUnionMember1: TypeAlias = Union[
+ InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText,
+ InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage,
+]
+
+
+class InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(
+ TypedDict, total=False
+):
+ file_id: Required[str]
+ """Unique identifier of the referenced file"""
+
+ filename: Required[str]
+ """Name of the referenced file"""
+
+ index: Required[int]
+ """Position index of the citation within the content"""
+
+ type: Required[Literal["file_citation"]]
+ """Annotation type identifier, always "file_citation" """
+
+
+class InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(
+ TypedDict, total=False
+):
+ end_index: Required[int]
+ """End position of the citation span in the content"""
+
+ start_index: Required[int]
+ """Start position of the citation span in the content"""
+
+ title: Required[str]
+ """Title of the referenced web resource"""
+
+ type: Required[Literal["url_citation"]]
+ """Annotation type identifier, always "url_citation" """
+
+ url: Required[str]
+ """URL of the referenced web resource"""
+
+
+class InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(
+ TypedDict, total=False
+):
+ container_id: Required[str]
+
+ end_index: Required[int]
+
+ file_id: Required[str]
+
+ filename: Required[str]
+
+ start_index: Required[int]
+
+ type: Required[Literal["container_file_citation"]]
+
+
+class InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(
+ TypedDict, total=False
+):
+ file_id: Required[str]
+
+ index: Required[int]
+
+ type: Required[Literal["file_path"]]
+
+
+InputUnionMember1OpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Union[
+ InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation,
+ InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation,
+ InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation,
+ InputUnionMember1OpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath,
+]
+
+
+class InputUnionMember1OpenAIResponseMessageContentUnionMember2(TypedDict, total=False):
+ annotations: Required[Iterable[InputUnionMember1OpenAIResponseMessageContentUnionMember2Annotation]]
+
+ text: Required[str]
+
+ type: Required[Literal["output_text"]]
+
+
+class InputUnionMember1OpenAIResponseMessage(TypedDict, total=False):
+ content: Required[
+ Union[
+ str,
+ Iterable[InputUnionMember1OpenAIResponseMessageContentUnionMember1],
+ Iterable[InputUnionMember1OpenAIResponseMessageContentUnionMember2],
+ ]
+ ]
+
+ role: Required[Literal["system", "developer", "user", "assistant"]]
+
+ type: Required[Literal["message"]]
+
+ id: str
+
+ status: str
+
+
+InputUnionMember1: TypeAlias = Union[
+ InputUnionMember1OpenAIResponseOutputMessageWebSearchToolCall,
+ InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall,
+ InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall,
+ InputUnionMember1OpenAIResponseInputFunctionToolCallOutput,
+ InputUnionMember1OpenAIResponseMcpApprovalRequest,
+ InputUnionMember1OpenAIResponseMcpApprovalResponse,
+ InputUnionMember1OpenAIResponseMessage,
+]
+
+
+class TextFormat(TypedDict, total=False):
+ type: Required[Literal["text", "json_schema", "json_object"]]
+ """Must be "text", "json_schema", or "json_object" to identify the format type"""
+
+ description: str
+ """(Optional) A description of the response format. Only used for json_schema."""
+
+ name: str
+ """The name of the response format. Only used for json_schema."""
+
+ schema: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
+ """The JSON schema the response should conform to.
+
+ In a Python SDK, this is often a `pydantic` model. Only used for json_schema.
+ """
+
+ strict: bool
+ """(Optional) Whether to strictly enforce the JSON schema.
+
+ If true, the response must match the schema exactly. Only used for json_schema.
+ """
+
+
+class Text(TypedDict, total=False):
+ format: TextFormat
+ """(Optional) Text format configuration specifying output format requirements"""
+
+
+class ToolOpenAIResponseInputToolWebSearch(TypedDict, total=False):
+ type: Required[Literal["web_search", "web_search_preview", "web_search_preview_2025_03_11"]]
+ """Web search tool type variant to use"""
+
+ search_context_size: str
+ """(Optional) Size of search context, must be "low", "medium", or "high" """
+
+
+class ToolOpenAIResponseInputToolFileSearchRankingOptions(TypedDict, total=False):
+ ranker: str
+ """(Optional) Name of the ranking algorithm to use"""
+
+ score_threshold: float
+ """(Optional) Minimum relevance score threshold for results"""
+
+
+class ToolOpenAIResponseInputToolFileSearch(TypedDict, total=False):
+ type: Required[Literal["file_search"]]
+ """Tool type identifier, always "file_search" """
+
+ vector_store_ids: Required[SequenceNotStr[str]]
+ """List of vector store identifiers to search within"""
+
+ filters: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
+ """(Optional) Additional filters to apply to the search"""
+
+ max_num_results: int
+ """(Optional) Maximum number of search results to return (1-50)"""
+
+ ranking_options: ToolOpenAIResponseInputToolFileSearchRankingOptions
+ """(Optional) Options for ranking and scoring search results"""
+
+
+class ToolOpenAIResponseInputToolFunction(TypedDict, total=False):
+ name: Required[str]
+ """Name of the function that can be called"""
+
+ type: Required[Literal["function"]]
+ """Tool type identifier, always "function" """
+
+ description: str
+ """(Optional) Description of what the function does"""
+
+ parameters: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
+ """(Optional) JSON schema defining the function's parameters"""
+
+ strict: bool
+ """(Optional) Whether to enforce strict parameter validation"""
+
+
+class ToolOpenAIResponseInputToolMcpRequireApprovalApprovalFilter(TypedDict, total=False):
+ always: SequenceNotStr[str]
+ """(Optional) List of tool names that always require approval"""
+
+ never: SequenceNotStr[str]
+ """(Optional) List of tool names that never require approval"""
+
+
+ToolOpenAIResponseInputToolMcpRequireApproval: TypeAlias = Union[
+ Literal["always", "never"], ToolOpenAIResponseInputToolMcpRequireApprovalApprovalFilter
+]
+
+
+class ToolOpenAIResponseInputToolMcpAllowedToolsAllowedToolsFilter(TypedDict, total=False):
+ tool_names: SequenceNotStr[str]
+ """(Optional) List of specific tool names that are allowed"""
+
+
+ToolOpenAIResponseInputToolMcpAllowedTools: TypeAlias = Union[
+ SequenceNotStr[str], ToolOpenAIResponseInputToolMcpAllowedToolsAllowedToolsFilter
+]
+
+
+class ToolOpenAIResponseInputToolMcp(TypedDict, total=False):
+ require_approval: Required[ToolOpenAIResponseInputToolMcpRequireApproval]
+ """Approval requirement for tool calls ("always", "never", or filter)"""
+
+ server_label: Required[str]
+ """Label to identify this MCP server"""
+
+ server_url: Required[str]
+ """URL endpoint of the MCP server"""
+
+ type: Required[Literal["mcp"]]
+ """Tool type identifier, always "mcp" """
+
+ allowed_tools: ToolOpenAIResponseInputToolMcpAllowedTools
+ """(Optional) Restriction on which tools can be used from this server"""
+
+ headers: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
+ """(Optional) HTTP headers to include when connecting to the server"""
+
+
+Tool: TypeAlias = Union[
+ ToolOpenAIResponseInputToolWebSearch,
+ ToolOpenAIResponseInputToolFileSearch,
+ ToolOpenAIResponseInputToolFunction,
+ ToolOpenAIResponseInputToolMcp,
+]
class ResponseCreateParamsNonStreaming(ResponseCreateParamsBase, total=False):
- pass
+ stream: Literal[False]
+
+
+class ResponseCreateParamsStreaming(ResponseCreateParamsBase):
+ stream: Required[Literal[True]]
-ResponseCreateParams = Union[ResponseCreateParamsNonStreaming, ResponseCreateParamsNonStreaming]
+ResponseCreateParams = Union[ResponseCreateParamsNonStreaming, ResponseCreateParamsStreaming]
diff --git a/src/llama_stack_client/types/response_create_response.py b/src/llama_stack_client/types/response_create_response.py
deleted file mode 100644
index bc031fcc..00000000
--- a/src/llama_stack_client/types/response_create_response.py
+++ /dev/null
@@ -1,640 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from pydantic import Field as FieldInfo
-
-from .._utils import PropertyInfo
-from .._models import BaseModel
-
-__all__ = [
- "ResponseCreateResponse",
- "Data",
- "DataInput",
- "DataInputOpenAIResponseOutputMessageWebSearchToolCall",
- "DataInputOpenAIResponseOutputMessageFileSearchToolCall",
- "DataInputOpenAIResponseOutputMessageFileSearchToolCallResult",
- "DataInputOpenAIResponseOutputMessageFunctionToolCall",
- "DataInputOpenAIResponseInputFunctionToolCallOutput",
- "DataInputOpenAIResponseMcpApprovalRequest",
- "DataInputOpenAIResponseMcpApprovalResponse",
- "DataInputOpenAIResponseMessage",
- "DataInputOpenAIResponseMessageContentUnionMember1",
- "DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
- "DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage",
- "DataInputOpenAIResponseMessageContentUnionMember2",
- "DataInputOpenAIResponseMessageContentUnionMember2Annotation",
- "DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation",
- "DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation",
- "DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation",
- "DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath",
- "DataOutput",
- "DataOutputOpenAIResponseMessage",
- "DataOutputOpenAIResponseMessageContentUnionMember1",
- "DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText",
- "DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage",
- "DataOutputOpenAIResponseMessageContentUnionMember2",
- "DataOutputOpenAIResponseMessageContentUnionMember2Annotation",
- "DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation",
- "DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation",
- "DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation",
- "DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath",
- "DataOutputOpenAIResponseOutputMessageWebSearchToolCall",
- "DataOutputOpenAIResponseOutputMessageFileSearchToolCall",
- "DataOutputOpenAIResponseOutputMessageFileSearchToolCallResult",
- "DataOutputOpenAIResponseOutputMessageFunctionToolCall",
- "DataOutputOpenAIResponseOutputMessageMcpCall",
- "DataOutputOpenAIResponseOutputMessageMcpListTools",
- "DataOutputOpenAIResponseOutputMessageMcpListToolsTool",
- "DataOutputOpenAIResponseMcpApprovalRequest",
- "DataText",
- "DataTextFormat",
- "DataError",
-]
-
-
-class DataInputOpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
- id: str
- """Unique identifier for this tool call"""
-
- status: str
- """Current status of the web search operation"""
-
- type: Literal["web_search_call"]
- """Tool call type identifier, always "web_search_call" """
-
-
-class DataInputOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel):
- attributes: Dict[str, Union[bool, float, str, List[object], object, None]]
- """(Optional) Key-value attributes associated with the file"""
-
- file_id: str
- """Unique identifier of the file containing the result"""
-
- filename: str
- """Name of the file containing the result"""
-
- score: float
- """Relevance score for this search result (between 0 and 1)"""
-
- text: str
- """Text content of the search result"""
-
-
-class DataInputOpenAIResponseOutputMessageFileSearchToolCall(BaseModel):
- id: str
- """Unique identifier for this tool call"""
-
- queries: List[str]
- """List of search queries executed"""
-
- status: str
- """Current status of the file search operation"""
-
- type: Literal["file_search_call"]
- """Tool call type identifier, always "file_search_call" """
-
- results: Optional[List[DataInputOpenAIResponseOutputMessageFileSearchToolCallResult]] = None
- """(Optional) Search results returned by the file search operation"""
-
-
-class DataInputOpenAIResponseOutputMessageFunctionToolCall(BaseModel):
- arguments: str
- """JSON string containing the function arguments"""
-
- call_id: str
- """Unique identifier for the function call"""
-
- name: str
- """Name of the function being called"""
-
- type: Literal["function_call"]
- """Tool call type identifier, always "function_call" """
-
- id: Optional[str] = None
- """(Optional) Additional identifier for the tool call"""
-
- status: Optional[str] = None
- """(Optional) Current status of the function call execution"""
-
-
-class DataInputOpenAIResponseInputFunctionToolCallOutput(BaseModel):
- call_id: str
-
- output: str
-
- type: Literal["function_call_output"]
-
- id: Optional[str] = None
-
- status: Optional[str] = None
-
-
-class DataInputOpenAIResponseMcpApprovalRequest(BaseModel):
- id: str
-
- arguments: str
-
- name: str
-
- server_label: str
-
- type: Literal["mcp_approval_request"]
-
-
-class DataInputOpenAIResponseMcpApprovalResponse(BaseModel):
- approval_request_id: str
-
- approve: bool
-
- type: Literal["mcp_approval_response"]
-
- id: Optional[str] = None
-
- reason: Optional[str] = None
-
-
-class DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel):
- text: str
- """The text content of the input message"""
-
- type: Literal["input_text"]
- """Content type identifier, always "input_text" """
-
-
-class DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(BaseModel):
- detail: Literal["low", "high", "auto"]
- """Level of detail for image processing, can be "low", "high", or "auto" """
-
- type: Literal["input_image"]
- """Content type identifier, always "input_image" """
-
- image_url: Optional[str] = None
- """(Optional) URL of the image content"""
-
-
-DataInputOpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[
- Union[
- DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText,
- DataInputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage,
- ],
- PropertyInfo(discriminator="type"),
-]
-
-
-class DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(BaseModel):
- file_id: str
- """Unique identifier of the referenced file"""
-
- filename: str
- """Name of the referenced file"""
-
- index: int
- """Position index of the citation within the content"""
-
- type: Literal["file_citation"]
- """Annotation type identifier, always "file_citation" """
-
-
-class DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(BaseModel):
- end_index: int
- """End position of the citation span in the content"""
-
- start_index: int
- """Start position of the citation span in the content"""
-
- title: str
- """Title of the referenced web resource"""
-
- type: Literal["url_citation"]
- """Annotation type identifier, always "url_citation" """
-
- url: str
- """URL of the referenced web resource"""
-
-
-class DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(
- BaseModel
-):
- container_id: str
-
- end_index: int
-
- file_id: str
-
- filename: str
-
- start_index: int
-
- type: Literal["container_file_citation"]
-
-
-class DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(BaseModel):
- file_id: str
-
- index: int
-
- type: Literal["file_path"]
-
-
-DataInputOpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Annotated[
- Union[
- DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation,
- DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation,
- DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation,
- DataInputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath,
- ],
- PropertyInfo(discriminator="type"),
-]
-
-
-class DataInputOpenAIResponseMessageContentUnionMember2(BaseModel):
- annotations: List[DataInputOpenAIResponseMessageContentUnionMember2Annotation]
-
- text: str
-
- type: Literal["output_text"]
-
-
-class DataInputOpenAIResponseMessage(BaseModel):
- content: Union[
- str,
- List[DataInputOpenAIResponseMessageContentUnionMember1],
- List[DataInputOpenAIResponseMessageContentUnionMember2],
- ]
-
- role: Literal["system", "developer", "user", "assistant"]
-
- type: Literal["message"]
-
- id: Optional[str] = None
-
- status: Optional[str] = None
-
-
-DataInput: TypeAlias = Union[
- DataInputOpenAIResponseOutputMessageWebSearchToolCall,
- DataInputOpenAIResponseOutputMessageFileSearchToolCall,
- DataInputOpenAIResponseOutputMessageFunctionToolCall,
- DataInputOpenAIResponseInputFunctionToolCallOutput,
- DataInputOpenAIResponseMcpApprovalRequest,
- DataInputOpenAIResponseMcpApprovalResponse,
- DataInputOpenAIResponseMessage,
-]
-
-
-class DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel):
- text: str
- """The text content of the input message"""
-
- type: Literal["input_text"]
- """Content type identifier, always "input_text" """
-
-
-class DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage(BaseModel):
- detail: Literal["low", "high", "auto"]
- """Level of detail for image processing, can be "low", "high", or "auto" """
-
- type: Literal["input_image"]
- """Content type identifier, always "input_image" """
-
- image_url: Optional[str] = None
- """(Optional) URL of the image content"""
-
-
-DataOutputOpenAIResponseMessageContentUnionMember1: TypeAlias = Annotated[
- Union[
- DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText,
- DataOutputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentImage,
- ],
- PropertyInfo(discriminator="type"),
-]
-
-
-class DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation(BaseModel):
- file_id: str
- """Unique identifier of the referenced file"""
-
- filename: str
- """Name of the referenced file"""
-
- index: int
- """Position index of the citation within the content"""
-
- type: Literal["file_citation"]
- """Annotation type identifier, always "file_citation" """
-
-
-class DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation(BaseModel):
- end_index: int
- """End position of the citation span in the content"""
-
- start_index: int
- """Start position of the citation span in the content"""
-
- title: str
- """Title of the referenced web resource"""
-
- type: Literal["url_citation"]
- """Annotation type identifier, always "url_citation" """
-
- url: str
- """URL of the referenced web resource"""
-
-
-class DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation(
- BaseModel
-):
- container_id: str
-
- end_index: int
-
- file_id: str
-
- filename: str
-
- start_index: int
-
- type: Literal["container_file_citation"]
-
-
-class DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath(BaseModel):
- file_id: str
-
- index: int
-
- type: Literal["file_path"]
-
-
-DataOutputOpenAIResponseMessageContentUnionMember2Annotation: TypeAlias = Annotated[
- Union[
- DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFileCitation,
- DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationCitation,
- DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationContainerFileCitation,
- DataOutputOpenAIResponseMessageContentUnionMember2AnnotationOpenAIResponseAnnotationFilePath,
- ],
- PropertyInfo(discriminator="type"),
-]
-
-
-class DataOutputOpenAIResponseMessageContentUnionMember2(BaseModel):
- annotations: List[DataOutputOpenAIResponseMessageContentUnionMember2Annotation]
-
- text: str
-
- type: Literal["output_text"]
-
-
-class DataOutputOpenAIResponseMessage(BaseModel):
- content: Union[
- str,
- List[DataOutputOpenAIResponseMessageContentUnionMember1],
- List[DataOutputOpenAIResponseMessageContentUnionMember2],
- ]
-
- role: Literal["system", "developer", "user", "assistant"]
-
- type: Literal["message"]
-
- id: Optional[str] = None
-
- status: Optional[str] = None
-
-
-class DataOutputOpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
- id: str
- """Unique identifier for this tool call"""
-
- status: str
- """Current status of the web search operation"""
-
- type: Literal["web_search_call"]
- """Tool call type identifier, always "web_search_call" """
-
-
-class DataOutputOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel):
- attributes: Dict[str, Union[bool, float, str, List[object], object, None]]
- """(Optional) Key-value attributes associated with the file"""
-
- file_id: str
- """Unique identifier of the file containing the result"""
-
- filename: str
- """Name of the file containing the result"""
-
- score: float
- """Relevance score for this search result (between 0 and 1)"""
-
- text: str
- """Text content of the search result"""
-
-
-class DataOutputOpenAIResponseOutputMessageFileSearchToolCall(BaseModel):
- id: str
- """Unique identifier for this tool call"""
-
- queries: List[str]
- """List of search queries executed"""
-
- status: str
- """Current status of the file search operation"""
-
- type: Literal["file_search_call"]
- """Tool call type identifier, always "file_search_call" """
-
- results: Optional[List[DataOutputOpenAIResponseOutputMessageFileSearchToolCallResult]] = None
- """(Optional) Search results returned by the file search operation"""
-
-
-class DataOutputOpenAIResponseOutputMessageFunctionToolCall(BaseModel):
- arguments: str
- """JSON string containing the function arguments"""
-
- call_id: str
- """Unique identifier for the function call"""
-
- name: str
- """Name of the function being called"""
-
- type: Literal["function_call"]
- """Tool call type identifier, always "function_call" """
-
- id: Optional[str] = None
- """(Optional) Additional identifier for the tool call"""
-
- status: Optional[str] = None
- """(Optional) Current status of the function call execution"""
-
-
-class DataOutputOpenAIResponseOutputMessageMcpCall(BaseModel):
- id: str
- """Unique identifier for this MCP call"""
-
- arguments: str
- """JSON string containing the MCP call arguments"""
-
- name: str
- """Name of the MCP method being called"""
-
- server_label: str
- """Label identifying the MCP server handling the call"""
-
- type: Literal["mcp_call"]
- """Tool call type identifier, always "mcp_call" """
-
- error: Optional[str] = None
- """(Optional) Error message if the MCP call failed"""
-
- output: Optional[str] = None
- """(Optional) Output result from the successful MCP call"""
-
-
-class DataOutputOpenAIResponseOutputMessageMcpListToolsTool(BaseModel):
- input_schema: Dict[str, Union[bool, float, str, List[object], object, None]]
- """JSON schema defining the tool's input parameters"""
-
- name: str
- """Name of the tool"""
-
- description: Optional[str] = None
- """(Optional) Description of what the tool does"""
-
-
-class DataOutputOpenAIResponseOutputMessageMcpListTools(BaseModel):
- id: str
- """Unique identifier for this MCP list tools operation"""
-
- server_label: str
- """Label identifying the MCP server providing the tools"""
-
- tools: List[DataOutputOpenAIResponseOutputMessageMcpListToolsTool]
- """List of available tools provided by the MCP server"""
-
- type: Literal["mcp_list_tools"]
- """Tool call type identifier, always "mcp_list_tools" """
-
-
-class DataOutputOpenAIResponseMcpApprovalRequest(BaseModel):
- id: str
-
- arguments: str
-
- name: str
-
- server_label: str
-
- type: Literal["mcp_approval_request"]
-
-
-DataOutput: TypeAlias = Annotated[
- Union[
- DataOutputOpenAIResponseMessage,
- DataOutputOpenAIResponseOutputMessageWebSearchToolCall,
- DataOutputOpenAIResponseOutputMessageFileSearchToolCall,
- DataOutputOpenAIResponseOutputMessageFunctionToolCall,
- DataOutputOpenAIResponseOutputMessageMcpCall,
- DataOutputOpenAIResponseOutputMessageMcpListTools,
- DataOutputOpenAIResponseMcpApprovalRequest,
- ],
- PropertyInfo(discriminator="type"),
-]
-
-
-class DataTextFormat(BaseModel):
- type: Literal["text", "json_schema", "json_object"]
- """Must be "text", "json_schema", or "json_object" to identify the format type"""
-
- description: Optional[str] = None
- """(Optional) A description of the response format. Only used for json_schema."""
-
- name: Optional[str] = None
- """The name of the response format. Only used for json_schema."""
-
- schema_: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = FieldInfo(
- alias="schema", default=None
- )
- """The JSON schema the response should conform to.
-
- In a Python SDK, this is often a `pydantic` model. Only used for json_schema.
- """
-
- strict: Optional[bool] = None
- """(Optional) Whether to strictly enforce the JSON schema.
-
- If true, the response must match the schema exactly. Only used for json_schema.
- """
-
-
-class DataText(BaseModel):
- format: Optional[DataTextFormat] = None
- """(Optional) Text format configuration specifying output format requirements"""
-
-
-class DataError(BaseModel):
- code: str
- """Error code identifying the type of failure"""
-
- message: str
- """Human-readable error message describing the failure"""
-
-
-class Data(BaseModel):
- id: str
- """Unique identifier for this response"""
-
- created_at: int
- """Unix timestamp when the response was created"""
-
- input: List[DataInput]
- """List of input items that led to this response"""
-
- model: str
- """Model identifier used for generation"""
-
- object: Literal["response"]
- """Object type identifier, always "response" """
-
- output: List[DataOutput]
- """List of generated output items (messages, tool calls, etc.)"""
-
- parallel_tool_calls: bool
- """Whether tool calls can be executed in parallel"""
-
- status: str
- """Current status of the response generation"""
-
- text: DataText
- """Text formatting configuration for the response"""
-
- error: Optional[DataError] = None
- """(Optional) Error details if the response generation failed"""
-
- previous_response_id: Optional[str] = None
- """(Optional) ID of the previous response in a conversation"""
-
- temperature: Optional[float] = None
- """(Optional) Sampling temperature used for generation"""
-
- top_p: Optional[float] = None
- """(Optional) Nucleus sampling parameter used for generation"""
-
- truncation: Optional[str] = None
- """(Optional) Truncation strategy applied to the response"""
-
-
-class ResponseCreateResponse(BaseModel):
- data: List[Data]
- """List of response objects with their input context"""
-
- first_id: str
- """Identifier of the first item in this page"""
-
- has_more: bool
- """Whether there are more results available beyond this page"""
-
- last_id: str
- """Identifier of the last item in this page"""
-
- object: Literal["list"]
- """Object type identifier, always "list" """
diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py
index ebea8b70..ad2ab3be 100644
--- a/tests/api_resources/test_responses.py
+++ b/tests/api_resources/test_responses.py
@@ -12,7 +12,6 @@
from llama_stack_client.types import (
ResponseObject,
ResponseListResponse,
- ResponseCreateResponse,
ResponseDeleteResponse,
)
from llama_stack_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
@@ -25,73 +24,133 @@ class TestResponses:
@parametrize
def test_method_create_overload_1(self, client: LlamaStackClient) -> None:
- response = client.responses.create()
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ response = client.responses.create(
+ input="string",
+ model="model",
+ )
+ assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient) -> None:
response = client.responses.create(
- after="after",
- limit=0,
+ input="string",
model="model",
- order="asc",
+ include=["string"],
+ instructions="instructions",
+ max_infer_iters=0,
+ previous_response_id="previous_response_id",
+ store=True,
+ stream=False,
+ temperature=0,
+ text={
+ "format": {
+ "type": "text",
+ "description": "description",
+ "name": "name",
+ "schema": {"foo": True},
+ "strict": True,
+ }
+ },
+ tools=[
+ {
+ "type": "web_search",
+ "search_context_size": "search_context_size",
+ }
+ ],
)
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
def test_raw_response_create_overload_1(self, client: LlamaStackClient) -> None:
- http_response = client.responses.with_raw_response.create()
+ http_response = client.responses.with_raw_response.create(
+ input="string",
+ model="model",
+ )
assert http_response.is_closed is True
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = http_response.parse()
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
def test_streaming_response_create_overload_1(self, client: LlamaStackClient) -> None:
- with client.responses.with_streaming_response.create() as http_response:
+ with client.responses.with_streaming_response.create(
+ input="string",
+ model="model",
+ ) as http_response:
assert not http_response.is_closed
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = http_response.parse()
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ assert_matches_type(ResponseObject, response, path=["response"])
assert cast(Any, http_response.is_closed) is True
@parametrize
def test_method_create_overload_2(self, client: LlamaStackClient) -> None:
- response = client.responses.create()
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ response_stream = client.responses.create(
+ input="string",
+ model="model",
+ stream=True,
+ )
+ response_stream.response.close()
@parametrize
def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient) -> None:
- response = client.responses.create(
- after="after",
- limit=0,
+ response_stream = client.responses.create(
+ input="string",
model="model",
- order="asc",
+ stream=True,
+ include=["string"],
+ instructions="instructions",
+ max_infer_iters=0,
+ previous_response_id="previous_response_id",
+ store=True,
+ temperature=0,
+ text={
+ "format": {
+ "type": "text",
+ "description": "description",
+ "name": "name",
+ "schema": {"foo": True},
+ "strict": True,
+ }
+ },
+ tools=[
+ {
+ "type": "web_search",
+ "search_context_size": "search_context_size",
+ }
+ ],
)
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ response_stream.response.close()
@parametrize
def test_raw_response_create_overload_2(self, client: LlamaStackClient) -> None:
- http_response = client.responses.with_raw_response.create()
+ response = client.responses.with_raw_response.create(
+ input="string",
+ model="model",
+ stream=True,
+ )
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = http_response.parse()
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = response.parse()
+ stream.close()
@parametrize
def test_streaming_response_create_overload_2(self, client: LlamaStackClient) -> None:
- with client.responses.with_streaming_response.create() as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with client.responses.with_streaming_response.create(
+ input="string",
+ model="model",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = http_response.parse()
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ stream = response.parse()
+ stream.close()
- assert cast(Any, http_response.is_closed) is True
+ assert cast(Any, response.is_closed) is True
@parametrize
def test_method_retrieve(self, client: LlamaStackClient) -> None:
@@ -212,73 +271,133 @@ class TestAsyncResponses:
@parametrize
async def test_method_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.responses.create()
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ response = await async_client.responses.create(
+ input="string",
+ model="model",
+ )
+ assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
response = await async_client.responses.create(
- after="after",
- limit=0,
+ input="string",
model="model",
- order="asc",
+ include=["string"],
+ instructions="instructions",
+ max_infer_iters=0,
+ previous_response_id="previous_response_id",
+ store=True,
+ stream=False,
+ temperature=0,
+ text={
+ "format": {
+ "type": "text",
+ "description": "description",
+ "name": "name",
+ "schema": {"foo": True},
+ "strict": True,
+ }
+ },
+ tools=[
+ {
+ "type": "web_search",
+ "search_context_size": "search_context_size",
+ }
+ ],
)
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
async def test_raw_response_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
- http_response = await async_client.responses.with_raw_response.create()
+ http_response = await async_client.responses.with_raw_response.create(
+ input="string",
+ model="model",
+ )
assert http_response.is_closed is True
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = await http_response.parse()
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
async def test_streaming_response_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.responses.with_streaming_response.create() as http_response:
+ async with async_client.responses.with_streaming_response.create(
+ input="string",
+ model="model",
+ ) as http_response:
assert not http_response.is_closed
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
response = await http_response.parse()
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ assert_matches_type(ResponseObject, response, path=["response"])
assert cast(Any, http_response.is_closed) is True
@parametrize
async def test_method_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.responses.create()
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ response_stream = await async_client.responses.create(
+ input="string",
+ model="model",
+ stream=True,
+ )
+ await response_stream.response.aclose()
@parametrize
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.responses.create(
- after="after",
- limit=0,
+ response_stream = await async_client.responses.create(
+ input="string",
model="model",
- order="asc",
+ stream=True,
+ include=["string"],
+ instructions="instructions",
+ max_infer_iters=0,
+ previous_response_id="previous_response_id",
+ store=True,
+ temperature=0,
+ text={
+ "format": {
+ "type": "text",
+ "description": "description",
+ "name": "name",
+ "schema": {"foo": True},
+ "strict": True,
+ }
+ },
+ tools=[
+ {
+ "type": "web_search",
+ "search_context_size": "search_context_size",
+ }
+ ],
)
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ await response_stream.response.aclose()
@parametrize
async def test_raw_response_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
- http_response = await async_client.responses.with_raw_response.create()
+ response = await async_client.responses.with_raw_response.create(
+ input="string",
+ model="model",
+ stream=True,
+ )
- assert http_response.is_closed is True
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = await http_response.parse()
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = await response.parse()
+ await stream.close()
@parametrize
async def test_streaming_response_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.responses.with_streaming_response.create() as http_response:
- assert not http_response.is_closed
- assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+ async with async_client.responses.with_streaming_response.create(
+ input="string",
+ model="model",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- response = await http_response.parse()
- assert_matches_type(ResponseCreateResponse, response, path=["response"])
+ stream = await response.parse()
+ await stream.close()
- assert cast(Any, http_response.is_closed) is True
+ assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
From 8542d1d2aaff782091ac5dc8c8dac59a0d1a5fa6 Mon Sep 17 00:00:00 2001
From: Ashwin Bharambe
Date: Thu, 2 Oct 2025 13:39:25 -0700
Subject: [PATCH 7/9] fix(manual): use tool.name instead of tool.identifier
---
src/llama_stack_client/lib/agents/agent.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/llama_stack_client/lib/agents/agent.py b/src/llama_stack_client/lib/agents/agent.py
index 779c44c2..5e00a88b 100644
--- a/src/llama_stack_client/lib/agents/agent.py
+++ b/src/llama_stack_client/lib/agents/agent.py
@@ -212,7 +212,7 @@ def initialize(self) -> None:
for tg in self.agent_config["toolgroups"]:
toolgroup_id = tg if isinstance(tg, str) else tg.get("name")
for tool in self.client.tools.list(toolgroup_id=toolgroup_id, extra_headers=self.extra_headers):
- self.builtin_tools[tool.identifier] = tg.get("args", {}) if isinstance(tg, dict) else {}
+ self.builtin_tools[tool.name] = tg.get("args", {}) if isinstance(tg, dict) else {}
def create_session(self, session_name: str) -> str:
agentic_system_create_session_response = self.client.alpha.agents.session.create(
@@ -475,7 +475,7 @@ async def initialize(self) -> None:
self._agent_id = agentic_system_create_response.agent_id
for tg in self.agent_config["toolgroups"]:
for tool in await self.client.tools.list(toolgroup_id=tg, extra_headers=self.extra_headers):
- self.builtin_tools[tool.identifier] = tg.get("args", {}) if isinstance(tg, dict) else {}
+ self.builtin_tools[tool.name] = tg.get("args", {}) if isinstance(tg, dict) else {}
async def create_session(self, session_name: str) -> str:
await self.initialize()
From a05eb6194fd7234420e6237cd34b84d2c859f525 Mon Sep 17 00:00:00 2001
From: Ashwin Bharambe
Date: Thu, 2 Oct 2025 14:38:12 -0700
Subject: [PATCH 8/9] fix(manual): kill arguments_json
---
src/llama_stack_client/lib/agents/client_tool.py | 7 +------
src/llama_stack_client/lib/agents/react/tool_parser.py | 3 +--
2 files changed, 2 insertions(+), 8 deletions(-)
diff --git a/src/llama_stack_client/lib/agents/client_tool.py b/src/llama_stack_client/lib/agents/client_tool.py
index f332aa13..09164361 100644
--- a/src/llama_stack_client/lib/agents/client_tool.py
+++ b/src/llama_stack_client/lib/agents/client_tool.py
@@ -81,12 +81,7 @@ def run(
metadata = {}
try:
- if tool_call.arguments_json is not None:
- params = json.loads(tool_call.arguments_json)
- elif isinstance(tool_call.arguments, str):
- params = json.loads(tool_call.arguments)
- else:
- params = tool_call.arguments
+ params = json.loads(tool_call.arguments)
response = self.run_impl(**params)
if isinstance(response, dict) and "content" in response:
diff --git a/src/llama_stack_client/lib/agents/react/tool_parser.py b/src/llama_stack_client/lib/agents/react/tool_parser.py
index 76b787dd..a796abac 100644
--- a/src/llama_stack_client/lib/agents/react/tool_parser.py
+++ b/src/llama_stack_client/lib/agents/react/tool_parser.py
@@ -55,8 +55,7 @@ def get_tool_calls(self, output_message: CompletionMessage) -> List[ToolCall]:
ToolCall(
call_id=call_id,
tool_name=tool_name,
- arguments=params,
- arguments_json=json.dumps(params),
+ arguments=json.dumps(params),
)
]
From 7ecbe657b6112556c1829ac55dc0b8e16dd05568 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 2 Oct 2025 21:38:39 +0000
Subject: [PATCH 9/9] release: 0.3.0-alpha.4
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 23 +++++++++++++++++++++++
pyproject.toml | 2 +-
3 files changed, 25 insertions(+), 2 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 51eda4ba..f3a4a10b 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.3.0-alpha.3"
+ ".": "0.3.0-alpha.4"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 05e36acf..76851aac 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,28 @@
# Changelog
+## 0.3.0-alpha.4 (2025-10-02)
+
+Full Changelog: [v0.3.0-alpha.3...v0.3.0-alpha.4](https://github.com/llamastack/llama-stack-client-python/compare/v0.3.0-alpha.3...v0.3.0-alpha.4)
+
+### ⚠ BREAKING CHANGES
+
+* **api:** use input_schema instead of parameters for tools
+
+### Features
+
+* **api:** fixes to URLs ([406c366](https://github.com/llamastack/llama-stack-client-python/commit/406c36699f5618b0d2673ab38c93516aa403778f))
+* **api:** tool api (input_schema, etc.) changes ([064b98b](https://github.com/llamastack/llama-stack-client-python/commit/064b98bb38a87ee2c9deb93344409216a389aecd))
+* **api:** use input_schema instead of parameters for tools ([2d53df4](https://github.com/llamastack/llama-stack-client-python/commit/2d53df4f8b44af56019571e4b2db9ab875fb13d3))
+
+
+### Bug Fixes
+
+* **api:** another fix to capture correct responses.create() params ([a41fdb1](https://github.com/llamastack/llama-stack-client-python/commit/a41fdb1089f180f612e4fee2204217099c1dddd0))
+* **api:** fix the ToolDefParam updates ([4e24a76](https://github.com/llamastack/llama-stack-client-python/commit/4e24a76a0065b5ebea99a5792389ce9aa0fe7483))
+* **manual:** kill arguments_json ([a05eb61](https://github.com/llamastack/llama-stack-client-python/commit/a05eb6194fd7234420e6237cd34b84d2c859f525))
+* **manual:** update lib/ code for the input_schema changes ([67b3d02](https://github.com/llamastack/llama-stack-client-python/commit/67b3d02467b6cb12d606d0626bbc2b52ae767885))
+* **manual:** use tool.name instead of tool.identifier ([8542d1d](https://github.com/llamastack/llama-stack-client-python/commit/8542d1d2aaff782091ac5dc8c8dac59a0d1a5fa6))
+
## 0.3.0-alpha.3 (2025-09-30)
Full Changelog: [v0.3.0-alpha.2...v0.3.0-alpha.3](https://github.com/llamastack/llama-stack-client-python/compare/v0.3.0-alpha.2...v0.3.0-alpha.3)
diff --git a/pyproject.toml b/pyproject.toml
index e0d567b3..99c36889 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "llama_stack_client"
-version = "0.3.0-alpha.3"
+version = "0.3.0-alpha.4"
description = "The official Python library for the llama-stack-client API"
dynamic = ["readme"]
license = "MIT"