From 119bdb2a862fe772ca82770937aba49ffb039bf2 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 21 Aug 2025 19:13:04 +0000
Subject: [PATCH 1/5] feat(api): manual updates
---
.stats.yml | 2 +-
api.md | 6 +--
src/llama_stack_client/pagination.py | 18 +++++---
src/llama_stack_client/resources/inference.py | 26 +++++------
src/llama_stack_client/types/__init__.py | 5 +--
.../chat_completion_response_stream_chunk.py | 4 +-
.../types/completion_response.py | 34 --------------
.../types/shared/__init__.py | 1 +
.../types/shared/agent_config.py | 43 ++++++++++++++++--
.../types/shared/batch_completion.py | 34 ++++++++++++--
.../types/shared/chat_completion_response.py | 4 +-
.../shared_token_log_probs.py} | 6 +--
.../types/shared_params/agent_config.py | 43 ++++++++++++++++--
src/llama_stack_client/types/tool_def.py | 38 ----------------
.../types/tool_def_param.py | 39 ----------------
.../types/tool_runtime_list_tools_response.py | 44 +++++++++++++++++--
tests/api_resources/test_inference.py | 18 ++++----
17 files changed, 197 insertions(+), 168 deletions(-)
delete mode 100644 src/llama_stack_client/types/completion_response.py
rename src/llama_stack_client/types/{token_log_probs.py => shared/shared_token_log_probs.py} (67%)
delete mode 100644 src/llama_stack_client/types/tool_def.py
delete mode 100644 src/llama_stack_client/types/tool_def_param.py
diff --git a/.stats.yml b/.stats.yml
index cd6eb84a..b90091ba 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 106
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-4f6633567c1a079df49d0cf58f37251a4bb0ee2f2a496ac83c9fee26eb325f9c.yml
openapi_spec_hash: af5b3d3bbecf48f15c90b982ccac852e
-config_hash: e67fd054e95c1e82f78f4b834e96bb65
+config_hash: ddcbd66d7ac80290da208232a746e30f
diff --git a/api.md b/api.md
index 4056f4a6..4f4c9180 100644
--- a/api.md
+++ b/api.md
@@ -20,6 +20,7 @@ from llama_stack_client.types import (
SafetyViolation,
SamplingParams,
ScoringResult,
+ SharedTokenLogProbs,
SystemMessage,
ToolCall,
ToolCallOrString,
@@ -62,7 +63,7 @@ Methods:
Types:
```python
-from llama_stack_client.types import ToolDef, ToolInvocationResult, ToolRuntimeListToolsResponse
+from llama_stack_client.types import ToolInvocationResult, ToolRuntimeListToolsResponse
```
Methods:
@@ -239,7 +240,6 @@ Types:
```python
from llama_stack_client.types import (
ChatCompletionResponseStreamChunk,
- CompletionResponse,
EmbeddingsResponse,
TokenLogProbs,
InferenceBatchChatCompletionResponse,
@@ -251,7 +251,7 @@ Methods:
- client.inference.batch_chat_completion(\*\*params) -> InferenceBatchChatCompletionResponse
- client.inference.batch_completion(\*\*params) -> BatchCompletion
- client.inference.chat_completion(\*\*params) -> ChatCompletionResponse
-- client.inference.completion(\*\*params) -> CompletionResponse
+- client.inference.completion(\*\*params) -> UnnamedTypeWithNoPropertyInfoOrParent0
- client.inference.embeddings(\*\*params) -> EmbeddingsResponse
# Embeddings
diff --git a/src/llama_stack_client/pagination.py b/src/llama_stack_client/pagination.py
index 9122ff46..67106bc5 100644
--- a/src/llama_stack_client/pagination.py
+++ b/src/llama_stack_client/pagination.py
@@ -24,10 +24,13 @@ def _get_page_items(self) -> List[_T]:
@override
def next_page_info(self) -> Optional[PageInfo]:
next_index = self.next_index
- if not next_index:
- return None
+ if next_index is None:
+ return None # type: ignore[unreachable]
+
+ length = len(self._get_page_items())
+ current_count = next_index + length
- return PageInfo(params={"start_index": next_index})
+ return PageInfo(params={"start_index": current_count})
class AsyncDatasetsIterrows(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
@@ -44,10 +47,13 @@ def _get_page_items(self) -> List[_T]:
@override
def next_page_info(self) -> Optional[PageInfo]:
next_index = self.next_index
- if not next_index:
- return None
+ if next_index is None:
+ return None # type: ignore[unreachable]
+
+ length = len(self._get_page_items())
+ current_count = next_index + length
- return PageInfo(params={"start_index": next_index})
+ return PageInfo(params={"start_index": current_count})
class SyncOpenAICursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
diff --git a/src/llama_stack_client/resources/inference.py b/src/llama_stack_client/resources/inference.py
index 7aec2dbd..a10919b5 100644
--- a/src/llama_stack_client/resources/inference.py
+++ b/src/llama_stack_client/resources/inference.py
@@ -27,10 +27,10 @@
)
from .._streaming import Stream, AsyncStream
from .._base_client import make_request_options
-from ..types.completion_response import CompletionResponse
from ..types.embeddings_response import EmbeddingsResponse
from ..types.shared_params.message import Message
from ..types.shared.batch_completion import BatchCompletion
+from ..types.inference_completion_params import UnnamedTypeWithNoPropertyInfoOrParent0
from ..types.shared_params.response_format import ResponseFormat
from ..types.shared_params.sampling_params import SamplingParams
from ..types.shared.chat_completion_response import ChatCompletionResponse
@@ -467,7 +467,7 @@ def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionResponse:
+ ) -> UnnamedTypeWithNoPropertyInfoOrParent0:
"""
Generate a completion for the given content using the specified model.
@@ -514,7 +514,7 @@ def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Stream[CompletionResponse]:
+ ) -> Stream[UnnamedTypeWithNoPropertyInfoOrParent0]:
"""
Generate a completion for the given content using the specified model.
@@ -561,7 +561,7 @@ def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionResponse | Stream[CompletionResponse]:
+ ) -> UnnamedTypeWithNoPropertyInfoOrParent0 | Stream[UnnamedTypeWithNoPropertyInfoOrParent0]:
"""
Generate a completion for the given content using the specified model.
@@ -608,7 +608,7 @@ def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionResponse | Stream[CompletionResponse]:
+ ) -> UnnamedTypeWithNoPropertyInfoOrParent0 | Stream[UnnamedTypeWithNoPropertyInfoOrParent0]:
if stream:
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
return self._post(
@@ -629,9 +629,9 @@ def completion(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=CompletionResponse,
+ cast_to=UnnamedTypeWithNoPropertyInfoOrParent0,
stream=stream or False,
- stream_cls=Stream[CompletionResponse],
+ stream_cls=Stream[UnnamedTypeWithNoPropertyInfoOrParent0],
)
@typing_extensions.deprecated("/v1/inference/embeddings is deprecated. Please use /v1/openai/v1/embeddings.")
@@ -1122,7 +1122,7 @@ async def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionResponse:
+ ) -> UnnamedTypeWithNoPropertyInfoOrParent0:
"""
Generate a completion for the given content using the specified model.
@@ -1169,7 +1169,7 @@ async def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncStream[CompletionResponse]:
+ ) -> AsyncStream[UnnamedTypeWithNoPropertyInfoOrParent0]:
"""
Generate a completion for the given content using the specified model.
@@ -1216,7 +1216,7 @@ async def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionResponse | AsyncStream[CompletionResponse]:
+ ) -> UnnamedTypeWithNoPropertyInfoOrParent0 | AsyncStream[UnnamedTypeWithNoPropertyInfoOrParent0]:
"""
Generate a completion for the given content using the specified model.
@@ -1263,7 +1263,7 @@ async def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionResponse | AsyncStream[CompletionResponse]:
+ ) -> UnnamedTypeWithNoPropertyInfoOrParent0 | AsyncStream[UnnamedTypeWithNoPropertyInfoOrParent0]:
if stream:
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
return await self._post(
@@ -1284,9 +1284,9 @@ async def completion(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=CompletionResponse,
+ cast_to=UnnamedTypeWithNoPropertyInfoOrParent0,
stream=stream or False,
- stream_cls=AsyncStream[CompletionResponse],
+ stream_cls=AsyncStream[UnnamedTypeWithNoPropertyInfoOrParent0],
)
@typing_extensions.deprecated("/v1/inference/embeddings is deprecated. Please use /v1/openai/v1/embeddings.")
diff --git a/src/llama_stack_client/types/__init__.py b/src/llama_stack_client/types/__init__.py
index 887f1706..06561221 100644
--- a/src/llama_stack_client/types/__init__.py
+++ b/src/llama_stack_client/types/__init__.py
@@ -27,6 +27,7 @@
ToolCallOrString as ToolCallOrString,
CompletionMessage as CompletionMessage,
InterleavedContent as InterleavedContent,
+ SharedTokenLogProbs as SharedTokenLogProbs,
ToolParamDefinition as ToolParamDefinition,
ToolResponseMessage as ToolResponseMessage,
QueryGeneratorConfig as QueryGeneratorConfig,
@@ -34,7 +35,6 @@
InterleavedContentItem as InterleavedContentItem,
)
from .shield import Shield as Shield
-from .tool_def import ToolDef as ToolDef
from .benchmark import Benchmark as Benchmark
from .route_info import RouteInfo as RouteInfo
from .scoring_fn import ScoringFn as ScoringFn
@@ -46,10 +46,8 @@
from .provider_info import ProviderInfo as ProviderInfo
from .tool_response import ToolResponse as ToolResponse
from .inference_step import InferenceStep as InferenceStep
-from .tool_def_param import ToolDefParam as ToolDefParam
from .create_response import CreateResponse as CreateResponse
from .response_object import ResponseObject as ResponseObject
-from .token_log_probs import TokenLogProbs as TokenLogProbs
from .file_list_params import FileListParams as FileListParams
from .shield_call_step import ShieldCallStep as ShieldCallStep
from .span_with_status import SpanWithStatus as SpanWithStatus
@@ -62,7 +60,6 @@
from .tool_list_response import ToolListResponse as ToolListResponse
from .agent_create_params import AgentCreateParams as AgentCreateParams
from .agent_list_response import AgentListResponse as AgentListResponse
-from .completion_response import CompletionResponse as CompletionResponse
from .embeddings_response import EmbeddingsResponse as EmbeddingsResponse
from .list_files_response import ListFilesResponse as ListFilesResponse
from .list_tools_response import ListToolsResponse as ListToolsResponse
diff --git a/src/llama_stack_client/types/chat_completion_response_stream_chunk.py b/src/llama_stack_client/types/chat_completion_response_stream_chunk.py
index 2b94eb18..3d4ac9a5 100644
--- a/src/llama_stack_client/types/chat_completion_response_stream_chunk.py
+++ b/src/llama_stack_client/types/chat_completion_response_stream_chunk.py
@@ -4,8 +4,8 @@
from typing_extensions import Literal
from .._models import BaseModel
-from .token_log_probs import TokenLogProbs
from .shared.content_delta import ContentDelta
+from .shared.shared_token_log_probs import SharedTokenLogProbs
__all__ = ["ChatCompletionResponseStreamChunk", "Event", "Metric"]
@@ -20,7 +20,7 @@ class Event(BaseModel):
event_type: Literal["start", "complete", "progress"]
"""Type of the event"""
- logprobs: Optional[List[TokenLogProbs]] = None
+ logprobs: Optional[List[SharedTokenLogProbs]] = None
"""Optional log probabilities for generated tokens"""
stop_reason: Optional[Literal["end_of_turn", "end_of_message", "out_of_tokens"]] = None
diff --git a/src/llama_stack_client/types/completion_response.py b/src/llama_stack_client/types/completion_response.py
deleted file mode 100644
index 51772801..00000000
--- a/src/llama_stack_client/types/completion_response.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .token_log_probs import TokenLogProbs
-
-__all__ = ["CompletionResponse", "Metric"]
-
-
-class Metric(BaseModel):
- metric: str
- """The name of the metric"""
-
- value: float
- """The numeric value of the metric"""
-
- unit: Optional[str] = None
- """(Optional) The unit of measurement for the metric value"""
-
-
-class CompletionResponse(BaseModel):
- content: str
- """The generated completion text"""
-
- stop_reason: Literal["end_of_turn", "end_of_message", "out_of_tokens"]
- """Reason why generation stopped"""
-
- logprobs: Optional[List[TokenLogProbs]] = None
- """Optional log probabilities for generated tokens"""
-
- metrics: Optional[List[Metric]] = None
- """(Optional) List of metrics associated with the API response"""
diff --git a/src/llama_stack_client/types/shared/__init__.py b/src/llama_stack_client/types/shared/__init__.py
index 0fe46810..3a8b0c83 100644
--- a/src/llama_stack_client/types/shared/__init__.py
+++ b/src/llama_stack_client/types/shared/__init__.py
@@ -22,5 +22,6 @@
from .tool_param_definition import ToolParamDefinition as ToolParamDefinition
from .tool_response_message import ToolResponseMessage as ToolResponseMessage
from .query_generator_config import QueryGeneratorConfig as QueryGeneratorConfig
+from .shared_token_log_probs import SharedTokenLogProbs as SharedTokenLogProbs
from .chat_completion_response import ChatCompletionResponse as ChatCompletionResponse
from .interleaved_content_item import InterleavedContentItem as InterleavedContentItem
diff --git a/src/llama_stack_client/types/shared/agent_config.py b/src/llama_stack_client/types/shared/agent_config.py
index eb116159..ba18c52b 100644
--- a/src/llama_stack_client/types/shared/agent_config.py
+++ b/src/llama_stack_client/types/shared/agent_config.py
@@ -4,11 +4,48 @@
from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
-from ..tool_def import ToolDef
from .response_format import ResponseFormat
from .sampling_params import SamplingParams
-__all__ = ["AgentConfig", "ToolConfig", "Toolgroup", "ToolgroupAgentToolGroupWithArgs"]
+__all__ = [
+ "AgentConfig",
+ "ClientTool",
+ "ClientToolParameter",
+ "ToolConfig",
+ "Toolgroup",
+ "ToolgroupAgentToolGroupWithArgs",
+]
+
+
+class ClientToolParameter(BaseModel):
+ description: str
+ """Human-readable description of what the parameter does"""
+
+ name: str
+ """Name of the parameter"""
+
+ parameter_type: str
+ """Type of the parameter (e.g., string, integer)"""
+
+ required: bool
+ """Whether this parameter is required for tool invocation"""
+
+ default: Union[bool, float, str, List[object], object, None] = None
+ """(Optional) Default value for the parameter if not provided"""
+
+
+class ClientTool(BaseModel):
+ name: str
+ """Name of the tool"""
+
+ description: Optional[str] = None
+ """(Optional) Human-readable description of what the tool does"""
+
+ metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """(Optional) Additional metadata about the tool"""
+
+ parameters: Optional[List[ClientToolParameter]] = None
+ """(Optional) List of parameters this tool accepts"""
class ToolConfig(BaseModel):
@@ -56,7 +93,7 @@ class AgentConfig(BaseModel):
model: str
"""The model identifier to use for the agent"""
- client_tools: Optional[List[ToolDef]] = None
+ client_tools: Optional[List[ClientTool]] = None
enable_session_persistence: Optional[bool] = None
"""Optional flag indicating whether session data has to be persisted"""
diff --git a/src/llama_stack_client/types/shared/batch_completion.py b/src/llama_stack_client/types/shared/batch_completion.py
index 43a0a735..450861b8 100644
--- a/src/llama_stack_client/types/shared/batch_completion.py
+++ b/src/llama_stack_client/types/shared/batch_completion.py
@@ -1,13 +1,39 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List
+from typing import List, Optional
+from typing_extensions import Literal
from ..._models import BaseModel
-from ..completion_response import CompletionResponse
+from .shared_token_log_probs import SharedTokenLogProbs
-__all__ = ["BatchCompletion"]
+__all__ = ["BatchCompletion", "Batch", "BatchMetric"]
+
+
+class BatchMetric(BaseModel):
+ metric: str
+ """The name of the metric"""
+
+ value: float
+ """The numeric value of the metric"""
+
+ unit: Optional[str] = None
+ """(Optional) The unit of measurement for the metric value"""
+
+
+class Batch(BaseModel):
+ content: str
+ """The generated completion text"""
+
+ stop_reason: Literal["end_of_turn", "end_of_message", "out_of_tokens"]
+ """Reason why generation stopped"""
+
+ logprobs: Optional[List[SharedTokenLogProbs]] = None
+ """Optional log probabilities for generated tokens"""
+
+ metrics: Optional[List[BatchMetric]] = None
+ """(Optional) List of metrics associated with the API response"""
class BatchCompletion(BaseModel):
- batch: List[CompletionResponse]
+ batch: List[Batch]
"""List of completion responses, one for each input in the batch"""
diff --git a/src/llama_stack_client/types/shared/chat_completion_response.py b/src/llama_stack_client/types/shared/chat_completion_response.py
index 3ff6e0bc..83519944 100644
--- a/src/llama_stack_client/types/shared/chat_completion_response.py
+++ b/src/llama_stack_client/types/shared/chat_completion_response.py
@@ -3,8 +3,8 @@
from typing import List, Optional
from ..._models import BaseModel
-from ..token_log_probs import TokenLogProbs
from .completion_message import CompletionMessage
+from .shared_token_log_probs import SharedTokenLogProbs
__all__ = ["ChatCompletionResponse", "Metric"]
@@ -24,7 +24,7 @@ class ChatCompletionResponse(BaseModel):
completion_message: CompletionMessage
"""The complete response message"""
- logprobs: Optional[List[TokenLogProbs]] = None
+ logprobs: Optional[List[SharedTokenLogProbs]] = None
"""Optional log probabilities for generated tokens"""
metrics: Optional[List[Metric]] = None
diff --git a/src/llama_stack_client/types/token_log_probs.py b/src/llama_stack_client/types/shared/shared_token_log_probs.py
similarity index 67%
rename from src/llama_stack_client/types/token_log_probs.py
rename to src/llama_stack_client/types/shared/shared_token_log_probs.py
index b1a0a2b4..904e69e3 100644
--- a/src/llama_stack_client/types/token_log_probs.py
+++ b/src/llama_stack_client/types/shared/shared_token_log_probs.py
@@ -2,11 +2,11 @@
from typing import Dict
-from .._models import BaseModel
+from ..._models import BaseModel
-__all__ = ["TokenLogProbs"]
+__all__ = ["SharedTokenLogProbs"]
-class TokenLogProbs(BaseModel):
+class SharedTokenLogProbs(BaseModel):
logprobs_by_token: Dict[str, float]
"""Dictionary mapping tokens to their log probabilities"""
diff --git a/src/llama_stack_client/types/shared_params/agent_config.py b/src/llama_stack_client/types/shared_params/agent_config.py
index 5cebec3f..676ec749 100644
--- a/src/llama_stack_client/types/shared_params/agent_config.py
+++ b/src/llama_stack_client/types/shared_params/agent_config.py
@@ -5,11 +5,48 @@
from typing import Dict, List, Union, Iterable
from typing_extensions import Literal, Required, TypeAlias, TypedDict
-from ..tool_def_param import ToolDefParam
from .response_format import ResponseFormat
from .sampling_params import SamplingParams
-__all__ = ["AgentConfig", "ToolConfig", "Toolgroup", "ToolgroupAgentToolGroupWithArgs"]
+__all__ = [
+ "AgentConfig",
+ "ClientTool",
+ "ClientToolParameter",
+ "ToolConfig",
+ "Toolgroup",
+ "ToolgroupAgentToolGroupWithArgs",
+]
+
+
+class ClientToolParameter(TypedDict, total=False):
+ description: Required[str]
+ """Human-readable description of what the parameter does"""
+
+ name: Required[str]
+ """Name of the parameter"""
+
+ parameter_type: Required[str]
+ """Type of the parameter (e.g., string, integer)"""
+
+ required: Required[bool]
+ """Whether this parameter is required for tool invocation"""
+
+ default: Union[bool, float, str, Iterable[object], object, None]
+ """(Optional) Default value for the parameter if not provided"""
+
+
+class ClientTool(TypedDict, total=False):
+ name: Required[str]
+ """Name of the tool"""
+
+ description: str
+ """(Optional) Human-readable description of what the tool does"""
+
+ metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
+ """(Optional) Additional metadata about the tool"""
+
+ parameters: Iterable[ClientToolParameter]
+ """(Optional) List of parameters this tool accepts"""
class ToolConfig(TypedDict, total=False):
@@ -57,7 +94,7 @@ class AgentConfig(TypedDict, total=False):
model: Required[str]
"""The model identifier to use for the agent"""
- client_tools: Iterable[ToolDefParam]
+ client_tools: Iterable[ClientTool]
enable_session_persistence: bool
"""Optional flag indicating whether session data has to be persisted"""
diff --git a/src/llama_stack_client/types/tool_def.py b/src/llama_stack_client/types/tool_def.py
deleted file mode 100644
index c82a9b8a..00000000
--- a/src/llama_stack_client/types/tool_def.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-
-from .._models import BaseModel
-
-__all__ = ["ToolDef", "Parameter"]
-
-
-class Parameter(BaseModel):
- description: str
- """Human-readable description of what the parameter does"""
-
- name: str
- """Name of the parameter"""
-
- parameter_type: str
- """Type of the parameter (e.g., string, integer)"""
-
- required: bool
- """Whether this parameter is required for tool invocation"""
-
- default: Union[bool, float, str, List[object], object, None] = None
- """(Optional) Default value for the parameter if not provided"""
-
-
-class ToolDef(BaseModel):
- name: str
- """Name of the tool"""
-
- description: Optional[str] = None
- """(Optional) Human-readable description of what the tool does"""
-
- metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
- """(Optional) Additional metadata about the tool"""
-
- parameters: Optional[List[Parameter]] = None
- """(Optional) List of parameters this tool accepts"""
diff --git a/src/llama_stack_client/types/tool_def_param.py b/src/llama_stack_client/types/tool_def_param.py
deleted file mode 100644
index 93ad8285..00000000
--- a/src/llama_stack_client/types/tool_def_param.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable
-from typing_extensions import Required, TypedDict
-
-__all__ = ["ToolDefParam", "Parameter"]
-
-
-class Parameter(TypedDict, total=False):
- description: Required[str]
- """Human-readable description of what the parameter does"""
-
- name: Required[str]
- """Name of the parameter"""
-
- parameter_type: Required[str]
- """Type of the parameter (e.g., string, integer)"""
-
- required: Required[bool]
- """Whether this parameter is required for tool invocation"""
-
- default: Union[bool, float, str, Iterable[object], object, None]
- """(Optional) Default value for the parameter if not provided"""
-
-
-class ToolDefParam(TypedDict, total=False):
- name: Required[str]
- """Name of the tool"""
-
- description: str
- """(Optional) Human-readable description of what the tool does"""
-
- metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
- """(Optional) Additional metadata about the tool"""
-
- parameters: Iterable[Parameter]
- """(Optional) List of parameters this tool accepts"""
diff --git a/src/llama_stack_client/types/tool_runtime_list_tools_response.py b/src/llama_stack_client/types/tool_runtime_list_tools_response.py
index cd65754f..9982d1d7 100644
--- a/src/llama_stack_client/types/tool_runtime_list_tools_response.py
+++ b/src/llama_stack_client/types/tool_runtime_list_tools_response.py
@@ -1,10 +1,46 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List
+from typing import Dict, List, Union, Optional
from typing_extensions import TypeAlias
-from .tool_def import ToolDef
+from .._models import BaseModel
-__all__ = ["ToolRuntimeListToolsResponse"]
+__all__ = [
+ "ToolRuntimeListToolsResponse",
+ "ToolRuntimeListToolsResponseItem",
+ "ToolRuntimeListToolsResponseItemParameter",
+]
-ToolRuntimeListToolsResponse: TypeAlias = List[ToolDef]
+
+class ToolRuntimeListToolsResponseItemParameter(BaseModel):
+ description: str
+ """Human-readable description of what the parameter does"""
+
+ name: str
+ """Name of the parameter"""
+
+ parameter_type: str
+ """Type of the parameter (e.g., string, integer)"""
+
+ required: bool
+ """Whether this parameter is required for tool invocation"""
+
+ default: Union[bool, float, str, List[object], object, None] = None
+ """(Optional) Default value for the parameter if not provided"""
+
+
+class ToolRuntimeListToolsResponseItem(BaseModel):
+ name: str
+ """Name of the tool"""
+
+ description: Optional[str] = None
+ """(Optional) Human-readable description of what the tool does"""
+
+ metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """(Optional) Additional metadata about the tool"""
+
+ parameters: Optional[List[ToolRuntimeListToolsResponseItemParameter]] = None
+ """(Optional) List of parameters this tool accepts"""
+
+
+ToolRuntimeListToolsResponse: TypeAlias = List[ToolRuntimeListToolsResponseItem]
diff --git a/tests/api_resources/test_inference.py b/tests/api_resources/test_inference.py
index d5ef46d1..59557a5e 100644
--- a/tests/api_resources/test_inference.py
+++ b/tests/api_resources/test_inference.py
@@ -10,11 +10,11 @@
from tests.utils import assert_matches_type
from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
from llama_stack_client.types import (
- CompletionResponse,
EmbeddingsResponse,
InferenceBatchChatCompletionResponse,
)
from llama_stack_client.types.shared import BatchCompletion, ChatCompletionResponse
+from llama_stack_client.types.inference_completion_params import UnnamedTypeWithNoPropertyInfoOrParent0
# pyright: reportDeprecated=false
@@ -392,7 +392,7 @@ def test_method_completion_overload_1(self, client: LlamaStackClient) -> None:
model_id="model_id",
)
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
@parametrize
def test_method_completion_with_all_params_overload_1(self, client: LlamaStackClient) -> None:
@@ -414,7 +414,7 @@ def test_method_completion_with_all_params_overload_1(self, client: LlamaStackCl
stream=False,
)
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
@parametrize
def test_raw_response_completion_overload_1(self, client: LlamaStackClient) -> None:
@@ -427,7 +427,7 @@ def test_raw_response_completion_overload_1(self, client: LlamaStackClient) -> N
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
inference = response.parse()
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
@parametrize
def test_streaming_response_completion_overload_1(self, client: LlamaStackClient) -> None:
@@ -440,7 +440,7 @@ def test_streaming_response_completion_overload_1(self, client: LlamaStackClient
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
inference = response.parse()
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -931,7 +931,7 @@ async def test_method_completion_overload_1(self, async_client: AsyncLlamaStackC
model_id="model_id",
)
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
@parametrize
async def test_method_completion_with_all_params_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
@@ -953,7 +953,7 @@ async def test_method_completion_with_all_params_overload_1(self, async_client:
stream=False,
)
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
@parametrize
async def test_raw_response_completion_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
@@ -966,7 +966,7 @@ async def test_raw_response_completion_overload_1(self, async_client: AsyncLlama
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
inference = await response.parse()
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
@parametrize
async def test_streaming_response_completion_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
@@ -979,7 +979,7 @@ async def test_streaming_response_completion_overload_1(self, async_client: Asyn
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
inference = await response.parse()
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
assert cast(Any, response.is_closed) is True
From af6b97e6ec55473a03682ea45e4bac9429fbdf78 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 22 Aug 2025 05:35:22 +0000
Subject: [PATCH 2/5] chore: update github action
---
.github/workflows/ci.yml | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ee914c4e..2c0a0c04 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -36,7 +36,7 @@ jobs:
run: ./scripts/lint
build:
- if: github.repository == 'stainless-sdks/llama-stack-client-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork)
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
timeout-minutes: 10
name: build
permissions:
@@ -61,12 +61,14 @@ jobs:
run: rye build
- name: Get GitHub OIDC Token
+ if: github.repository == 'stainless-sdks/llama-stack-client-python'
id: github-oidc
uses: actions/github-script@v6
with:
script: core.setOutput('github_token', await core.getIDToken());
- name: Upload tarball
+ if: github.repository == 'stainless-sdks/llama-stack-client-python'
env:
URL: https://pkg.stainless.com/s
AUTH: ${{ steps.github-oidc.outputs.github_token }}
From c935c79c1117613c7e9413b87d19cfd010d89796 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 22 Aug 2025 22:54:20 +0000
Subject: [PATCH 3/5] feat(api): query_metrics, batches, changes
---
.stats.yml | 4 ++--
src/llama_stack_client/resources/files.py | 8 ++++----
src/llama_stack_client/types/create_response.py | 8 +-------
src/llama_stack_client/types/file.py | 2 +-
src/llama_stack_client/types/file_create_params.py | 2 +-
src/llama_stack_client/types/file_list_params.py | 2 +-
6 files changed, 10 insertions(+), 16 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index b90091ba..db3fa118 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 106
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-4f6633567c1a079df49d0cf58f37251a4bb0ee2f2a496ac83c9fee26eb325f9c.yml
-openapi_spec_hash: af5b3d3bbecf48f15c90b982ccac852e
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f252873ea1e1f38fd207331ef2621c511154d5be3f4076e59cc15754fc58eee4.yml
+openapi_spec_hash: 10cbb4337a06a9fdd7d08612dd6044c3
config_hash: ddcbd66d7ac80290da208232a746e30f
diff --git a/src/llama_stack_client/resources/files.py b/src/llama_stack_client/resources/files.py
index 96c5c871..4a74e148 100644
--- a/src/llama_stack_client/resources/files.py
+++ b/src/llama_stack_client/resources/files.py
@@ -50,7 +50,7 @@ def create(
self,
*,
file: FileTypes,
- purpose: Literal["assistants"],
+ purpose: Literal["assistants", "batch"],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -137,7 +137,7 @@ def list(
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- purpose: Literal["assistants"] | NotGiven = NOT_GIVEN,
+ purpose: Literal["assistants", "batch"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -282,7 +282,7 @@ async def create(
self,
*,
file: FileTypes,
- purpose: Literal["assistants"],
+ purpose: Literal["assistants", "batch"],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -369,7 +369,7 @@ def list(
after: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- purpose: Literal["assistants"] | NotGiven = NOT_GIVEN,
+ purpose: Literal["assistants", "batch"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
diff --git a/src/llama_stack_client/types/create_response.py b/src/llama_stack_client/types/create_response.py
index b0eaf3e5..fbb519f0 100644
--- a/src/llama_stack_client/types/create_response.py
+++ b/src/llama_stack_client/types/create_response.py
@@ -22,13 +22,7 @@ class Result(BaseModel):
"""
category_scores: Optional[Dict[str, float]] = None
- """A list of the categories along with their scores as predicted by model.
-
- Required set of categories that need to be in response - violence -
- violence/graphic - harassment - harassment/threatening - hate -
- hate/threatening - illicit - illicit/violent - sexual - sexual/minors -
- self-harm - self-harm/intent - self-harm/instructions
- """
+ """A list of the categories along with their scores as predicted by model."""
user_message: Optional[str] = None
diff --git a/src/llama_stack_client/types/file.py b/src/llama_stack_client/types/file.py
index 74ead6b7..b87d04ef 100644
--- a/src/llama_stack_client/types/file.py
+++ b/src/llama_stack_client/types/file.py
@@ -26,5 +26,5 @@ class File(BaseModel):
object: Literal["file"]
"""The object type, which is always "file" """
- purpose: Literal["assistants"]
+ purpose: Literal["assistants", "batch"]
"""The intended purpose of the file"""
diff --git a/src/llama_stack_client/types/file_create_params.py b/src/llama_stack_client/types/file_create_params.py
index 8342aad2..8322c0a9 100644
--- a/src/llama_stack_client/types/file_create_params.py
+++ b/src/llama_stack_client/types/file_create_params.py
@@ -12,5 +12,5 @@
class FileCreateParams(TypedDict, total=False):
file: Required[FileTypes]
- purpose: Required[Literal["assistants"]]
+ purpose: Required[Literal["assistants", "batch"]]
"""Valid purpose values for OpenAI Files API."""
diff --git a/src/llama_stack_client/types/file_list_params.py b/src/llama_stack_client/types/file_list_params.py
index 3f7d6ed5..2b504e53 100644
--- a/src/llama_stack_client/types/file_list_params.py
+++ b/src/llama_stack_client/types/file_list_params.py
@@ -29,5 +29,5 @@ class FileListParams(TypedDict, total=False):
`asc` for ascending order and `desc` for descending order.
"""
- purpose: Literal["assistants"]
+ purpose: Literal["assistants", "batch"]
"""Only return files with the given purpose."""
From 8f0f7a5de82f1dd3404cedff599b8a33f6e5c755 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 22 Aug 2025 23:00:10 +0000
Subject: [PATCH 4/5] feat(api): some updates to query metrics
---
.stats.yml | 4 +-
api.md | 3 +
src/llama_stack_client/resources/telemetry.py | 139 ++++++++++++++++++
src/llama_stack_client/types/__init__.py | 3 +
.../chat_completion_response_stream_chunk.py | 14 +-
src/llama_stack_client/types/metric.py | 18 +++
.../types/shared/batch_completion.py | 16 +-
.../types/shared/chat_completion_response.py | 14 +-
.../types/telemetry_query_metrics_params.py | 36 +++++
.../types/telemetry_query_metrics_response.py | 45 ++++++
tests/api_resources/test_telemetry.py | 139 ++++++++++++++++++
11 files changed, 392 insertions(+), 39 deletions(-)
create mode 100644 src/llama_stack_client/types/metric.py
create mode 100644 src/llama_stack_client/types/telemetry_query_metrics_params.py
create mode 100644 src/llama_stack_client/types/telemetry_query_metrics_response.py
diff --git a/.stats.yml b/.stats.yml
index db3fa118..ef192463 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 106
+configured_endpoints: 107
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-f252873ea1e1f38fd207331ef2621c511154d5be3f4076e59cc15754fc58eee4.yml
openapi_spec_hash: 10cbb4337a06a9fdd7d08612dd6044c3
-config_hash: ddcbd66d7ac80290da208232a746e30f
+config_hash: 17fe64b23723fc54f2ee61c80223c3e3
diff --git a/api.md b/api.md
index 4f4c9180..9fba7963 100644
--- a/api.md
+++ b/api.md
@@ -509,12 +509,14 @@ Types:
```python
from llama_stack_client.types import (
Event,
+ Metric,
QueryCondition,
QuerySpansResponse,
SpanWithStatus,
Trace,
TelemetryGetSpanResponse,
TelemetryGetSpanTreeResponse,
+ TelemetryQueryMetricsResponse,
TelemetryQuerySpansResponse,
TelemetryQueryTracesResponse,
)
@@ -526,6 +528,7 @@ Methods:
- client.telemetry.get_span_tree(span_id, \*\*params) -> TelemetryGetSpanTreeResponse
- client.telemetry.get_trace(trace_id) -> Trace
- client.telemetry.log_event(\*\*params) -> None
+- client.telemetry.query_metrics(metric_name, \*\*params) -> TelemetryQueryMetricsResponse
- client.telemetry.query_spans(\*\*params) -> TelemetryQuerySpansResponse
- client.telemetry.query_traces(\*\*params) -> TelemetryQueryTracesResponse
- client.telemetry.save_spans_to_dataset(\*\*params) -> None
diff --git a/src/llama_stack_client/resources/telemetry.py b/src/llama_stack_client/resources/telemetry.py
index 1cec537e..b8b47c49 100644
--- a/src/llama_stack_client/resources/telemetry.py
+++ b/src/llama_stack_client/resources/telemetry.py
@@ -3,6 +3,7 @@
from __future__ import annotations
from typing import List, Type, Iterable, cast
+from typing_extensions import Literal
import httpx
@@ -11,6 +12,7 @@
telemetry_query_spans_params,
telemetry_query_traces_params,
telemetry_get_span_tree_params,
+ telemetry_query_metrics_params,
telemetry_save_spans_to_dataset_params,
)
from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
@@ -32,6 +34,7 @@
from ..types.telemetry_query_spans_response import TelemetryQuerySpansResponse
from ..types.telemetry_query_traces_response import TelemetryQueryTracesResponse
from ..types.telemetry_get_span_tree_response import TelemetryGetSpanTreeResponse
+from ..types.telemetry_query_metrics_response import TelemetryQueryMetricsResponse
__all__ = ["TelemetryResource", "AsyncTelemetryResource"]
@@ -219,6 +222,68 @@ def log_event(
cast_to=NoneType,
)
+ def query_metrics(
+ self,
+ metric_name: str,
+ *,
+ query_type: Literal["range", "instant"],
+ start_time: int,
+ end_time: int | NotGiven = NOT_GIVEN,
+ granularity: str | NotGiven = NOT_GIVEN,
+ label_matchers: Iterable[telemetry_query_metrics_params.LabelMatcher] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> TelemetryQueryMetricsResponse:
+ """
+ Query metrics.
+
+ Args:
+ query_type: The type of query to perform.
+
+ start_time: The start time of the metric to query.
+
+ end_time: The end time of the metric to query.
+
+ granularity: The granularity of the metric to query.
+
+ label_matchers: The label matchers to apply to the metric.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not metric_name:
+ raise ValueError(f"Expected a non-empty value for `metric_name` but received {metric_name!r}")
+ return self._post(
+ f"/v1/telemetry/metrics/{metric_name}",
+ body=maybe_transform(
+ {
+ "query_type": query_type,
+ "start_time": start_time,
+ "end_time": end_time,
+ "granularity": granularity,
+ "label_matchers": label_matchers,
+ },
+ telemetry_query_metrics_params.TelemetryQueryMetricsParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ post_parser=DataWrapper[TelemetryQueryMetricsResponse]._unwrapper,
+ ),
+ cast_to=cast(Type[TelemetryQueryMetricsResponse], DataWrapper[TelemetryQueryMetricsResponse]),
+ )
+
def query_spans(
self,
*,
@@ -561,6 +626,68 @@ async def log_event(
cast_to=NoneType,
)
+ async def query_metrics(
+ self,
+ metric_name: str,
+ *,
+ query_type: Literal["range", "instant"],
+ start_time: int,
+ end_time: int | NotGiven = NOT_GIVEN,
+ granularity: str | NotGiven = NOT_GIVEN,
+ label_matchers: Iterable[telemetry_query_metrics_params.LabelMatcher] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> TelemetryQueryMetricsResponse:
+ """
+ Query metrics.
+
+ Args:
+ query_type: The type of query to perform.
+
+ start_time: The start time of the metric to query.
+
+ end_time: The end time of the metric to query.
+
+ granularity: The granularity of the metric to query.
+
+ label_matchers: The label matchers to apply to the metric.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not metric_name:
+ raise ValueError(f"Expected a non-empty value for `metric_name` but received {metric_name!r}")
+ return await self._post(
+ f"/v1/telemetry/metrics/{metric_name}",
+ body=await async_maybe_transform(
+ {
+ "query_type": query_type,
+ "start_time": start_time,
+ "end_time": end_time,
+ "granularity": granularity,
+ "label_matchers": label_matchers,
+ },
+ telemetry_query_metrics_params.TelemetryQueryMetricsParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ post_parser=DataWrapper[TelemetryQueryMetricsResponse]._unwrapper,
+ ),
+ cast_to=cast(Type[TelemetryQueryMetricsResponse], DataWrapper[TelemetryQueryMetricsResponse]),
+ )
+
async def query_spans(
self,
*,
@@ -736,6 +863,9 @@ def __init__(self, telemetry: TelemetryResource) -> None:
self.log_event = to_raw_response_wrapper(
telemetry.log_event,
)
+ self.query_metrics = to_raw_response_wrapper(
+ telemetry.query_metrics,
+ )
self.query_spans = to_raw_response_wrapper(
telemetry.query_spans,
)
@@ -763,6 +893,9 @@ def __init__(self, telemetry: AsyncTelemetryResource) -> None:
self.log_event = async_to_raw_response_wrapper(
telemetry.log_event,
)
+ self.query_metrics = async_to_raw_response_wrapper(
+ telemetry.query_metrics,
+ )
self.query_spans = async_to_raw_response_wrapper(
telemetry.query_spans,
)
@@ -790,6 +923,9 @@ def __init__(self, telemetry: TelemetryResource) -> None:
self.log_event = to_streamed_response_wrapper(
telemetry.log_event,
)
+ self.query_metrics = to_streamed_response_wrapper(
+ telemetry.query_metrics,
+ )
self.query_spans = to_streamed_response_wrapper(
telemetry.query_spans,
)
@@ -817,6 +953,9 @@ def __init__(self, telemetry: AsyncTelemetryResource) -> None:
self.log_event = async_to_streamed_response_wrapper(
telemetry.log_event,
)
+ self.query_metrics = async_to_streamed_response_wrapper(
+ telemetry.query_metrics,
+ )
self.query_spans = async_to_streamed_response_wrapper(
telemetry.query_spans,
)
diff --git a/src/llama_stack_client/types/__init__.py b/src/llama_stack_client/types/__init__.py
index 06561221..0ab7f506 100644
--- a/src/llama_stack_client/types/__init__.py
+++ b/src/llama_stack_client/types/__init__.py
@@ -7,6 +7,7 @@
from .tool import Tool as Tool
from .model import Model as Model
from .trace import Trace as Trace
+from .metric import Metric as Metric
from .shared import (
Message as Message,
Document as Document,
@@ -142,6 +143,7 @@
from .telemetry_query_traces_params import TelemetryQueryTracesParams as TelemetryQueryTracesParams
from .scoring_function_list_response import ScoringFunctionListResponse as ScoringFunctionListResponse
from .telemetry_get_span_tree_params import TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams
+from .telemetry_query_metrics_params import TelemetryQueryMetricsParams as TelemetryQueryMetricsParams
from .telemetry_query_spans_response import TelemetryQuerySpansResponse as TelemetryQuerySpansResponse
from .tool_runtime_list_tools_params import ToolRuntimeListToolsParams as ToolRuntimeListToolsParams
from .eval_evaluate_rows_alpha_params import EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams
@@ -152,6 +154,7 @@
from .list_post_training_jobs_response import ListPostTrainingJobsResponse as ListPostTrainingJobsResponse
from .scoring_function_register_params import ScoringFunctionRegisterParams as ScoringFunctionRegisterParams
from .telemetry_get_span_tree_response import TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse
+from .telemetry_query_metrics_response import TelemetryQueryMetricsResponse as TelemetryQueryMetricsResponse
from .tool_runtime_list_tools_response import ToolRuntimeListToolsResponse as ToolRuntimeListToolsResponse
from .inference_batch_completion_params import InferenceBatchCompletionParams as InferenceBatchCompletionParams
from .synthetic_data_generation_response import SyntheticDataGenerationResponse as SyntheticDataGenerationResponse
diff --git a/src/llama_stack_client/types/chat_completion_response_stream_chunk.py b/src/llama_stack_client/types/chat_completion_response_stream_chunk.py
index 3d4ac9a5..be9d49ba 100644
--- a/src/llama_stack_client/types/chat_completion_response_stream_chunk.py
+++ b/src/llama_stack_client/types/chat_completion_response_stream_chunk.py
@@ -3,11 +3,12 @@
from typing import List, Optional
from typing_extensions import Literal
+from .metric import Metric
from .._models import BaseModel
from .shared.content_delta import ContentDelta
from .shared.shared_token_log_probs import SharedTokenLogProbs
-__all__ = ["ChatCompletionResponseStreamChunk", "Event", "Metric"]
+__all__ = ["ChatCompletionResponseStreamChunk", "Event"]
class Event(BaseModel):
@@ -27,17 +28,6 @@ class Event(BaseModel):
"""Optional reason why generation stopped, if complete"""
-class Metric(BaseModel):
- metric: str
- """The name of the metric"""
-
- value: float
- """The numeric value of the metric"""
-
- unit: Optional[str] = None
- """(Optional) The unit of measurement for the metric value"""
-
-
class ChatCompletionResponseStreamChunk(BaseModel):
event: Event
"""The event containing the new content"""
diff --git a/src/llama_stack_client/types/metric.py b/src/llama_stack_client/types/metric.py
new file mode 100644
index 00000000..eb6fab3c
--- /dev/null
+++ b/src/llama_stack_client/types/metric.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["Metric"]
+
+
+class Metric(BaseModel):
+ metric: str
+ """The name of the metric"""
+
+ value: float
+ """The numeric value of the metric"""
+
+ unit: Optional[str] = None
+ """(Optional) The unit of measurement for the metric value"""
diff --git a/src/llama_stack_client/types/shared/batch_completion.py b/src/llama_stack_client/types/shared/batch_completion.py
index 450861b8..0a0790fb 100644
--- a/src/llama_stack_client/types/shared/batch_completion.py
+++ b/src/llama_stack_client/types/shared/batch_completion.py
@@ -3,21 +3,11 @@
from typing import List, Optional
from typing_extensions import Literal
+from ..metric import Metric
from ..._models import BaseModel
from .shared_token_log_probs import SharedTokenLogProbs
-__all__ = ["BatchCompletion", "Batch", "BatchMetric"]
-
-
-class BatchMetric(BaseModel):
- metric: str
- """The name of the metric"""
-
- value: float
- """The numeric value of the metric"""
-
- unit: Optional[str] = None
- """(Optional) The unit of measurement for the metric value"""
+__all__ = ["BatchCompletion", "Batch"]
class Batch(BaseModel):
@@ -30,7 +20,7 @@ class Batch(BaseModel):
logprobs: Optional[List[SharedTokenLogProbs]] = None
"""Optional log probabilities for generated tokens"""
- metrics: Optional[List[BatchMetric]] = None
+ metrics: Optional[List[Metric]] = None
"""(Optional) List of metrics associated with the API response"""
diff --git a/src/llama_stack_client/types/shared/chat_completion_response.py b/src/llama_stack_client/types/shared/chat_completion_response.py
index 83519944..a7164071 100644
--- a/src/llama_stack_client/types/shared/chat_completion_response.py
+++ b/src/llama_stack_client/types/shared/chat_completion_response.py
@@ -2,22 +2,12 @@
from typing import List, Optional
+from ..metric import Metric
from ..._models import BaseModel
from .completion_message import CompletionMessage
from .shared_token_log_probs import SharedTokenLogProbs
-__all__ = ["ChatCompletionResponse", "Metric"]
-
-
-class Metric(BaseModel):
- metric: str
- """The name of the metric"""
-
- value: float
- """The numeric value of the metric"""
-
- unit: Optional[str] = None
- """(Optional) The unit of measurement for the metric value"""
+__all__ = ["ChatCompletionResponse"]
class ChatCompletionResponse(BaseModel):
diff --git a/src/llama_stack_client/types/telemetry_query_metrics_params.py b/src/llama_stack_client/types/telemetry_query_metrics_params.py
new file mode 100644
index 00000000..adf3f720
--- /dev/null
+++ b/src/llama_stack_client/types/telemetry_query_metrics_params.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["TelemetryQueryMetricsParams", "LabelMatcher"]
+
+
+class TelemetryQueryMetricsParams(TypedDict, total=False):
+ query_type: Required[Literal["range", "instant"]]
+ """The type of query to perform."""
+
+ start_time: Required[int]
+ """The start time of the metric to query."""
+
+ end_time: int
+ """The end time of the metric to query."""
+
+ granularity: str
+ """The granularity of the metric to query."""
+
+ label_matchers: Iterable[LabelMatcher]
+ """The label matchers to apply to the metric."""
+
+
+class LabelMatcher(TypedDict, total=False):
+ name: Required[str]
+ """The name of the label to match"""
+
+ operator: Required[Literal["=", "!=", "=~", "!~"]]
+ """The comparison operator to use for matching"""
+
+ value: Required[str]
+ """The value to match against"""
diff --git a/src/llama_stack_client/types/telemetry_query_metrics_response.py b/src/llama_stack_client/types/telemetry_query_metrics_response.py
new file mode 100644
index 00000000..e9f4264e
--- /dev/null
+++ b/src/llama_stack_client/types/telemetry_query_metrics_response.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import TypeAlias
+
+from .._models import BaseModel
+
+__all__ = [
+ "TelemetryQueryMetricsResponse",
+ "TelemetryQueryMetricsResponseItem",
+ "TelemetryQueryMetricsResponseItemLabel",
+ "TelemetryQueryMetricsResponseItemValue",
+]
+
+
+class TelemetryQueryMetricsResponseItemLabel(BaseModel):
+ name: str
+ """The name of the label"""
+
+ value: str
+ """The value of the label"""
+
+
+class TelemetryQueryMetricsResponseItemValue(BaseModel):
+ timestamp: int
+ """Unix timestamp when the metric value was recorded"""
+
+ unit: str
+
+ value: float
+ """The numeric value of the metric at this timestamp"""
+
+
+class TelemetryQueryMetricsResponseItem(BaseModel):
+ labels: List[TelemetryQueryMetricsResponseItemLabel]
+ """List of labels associated with this metric series"""
+
+ metric: str
+ """The name of the metric"""
+
+ values: List[TelemetryQueryMetricsResponseItemValue]
+ """List of data points in chronological order"""
+
+
+TelemetryQueryMetricsResponse: TypeAlias = List[TelemetryQueryMetricsResponseItem]
diff --git a/tests/api_resources/test_telemetry.py b/tests/api_resources/test_telemetry.py
index 14a8801c..ea123787 100644
--- a/tests/api_resources/test_telemetry.py
+++ b/tests/api_resources/test_telemetry.py
@@ -15,6 +15,7 @@
TelemetryQuerySpansResponse,
TelemetryGetSpanTreeResponse,
TelemetryQueryTracesResponse,
+ TelemetryQueryMetricsResponse,
)
from llama_stack_client._utils import parse_datetime
@@ -228,6 +229,75 @@ def test_streaming_response_log_event(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_method_query_metrics(self, client: LlamaStackClient) -> None:
+ telemetry = client.telemetry.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ )
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_method_query_metrics_with_all_params(self, client: LlamaStackClient) -> None:
+ telemetry = client.telemetry.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ end_time=0,
+ granularity="granularity",
+ label_matchers=[
+ {
+ "name": "name",
+ "operator": "=",
+ "value": "value",
+ }
+ ],
+ )
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_raw_response_query_metrics(self, client: LlamaStackClient) -> None:
+ response = client.telemetry.with_raw_response.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ telemetry = response.parse()
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_streaming_response_query_metrics(self, client: LlamaStackClient) -> None:
+ with client.telemetry.with_streaming_response.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ telemetry = response.parse()
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ def test_path_params_query_metrics(self, client: LlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `metric_name` but received ''"):
+ client.telemetry.with_raw_response.query_metrics(
+ metric_name="",
+ query_type="range",
+ start_time=0,
+ )
+
@pytest.mark.skip(reason="unsupported query params in java / kotlin")
@parametrize
def test_method_query_spans(self, client: LlamaStackClient) -> None:
@@ -625,6 +695,75 @@ async def test_streaming_response_log_event(self, async_client: AsyncLlamaStackC
assert cast(Any, response.is_closed) is True
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_method_query_metrics(self, async_client: AsyncLlamaStackClient) -> None:
+ telemetry = await async_client.telemetry.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ )
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_method_query_metrics_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ telemetry = await async_client.telemetry.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ end_time=0,
+ granularity="granularity",
+ label_matchers=[
+ {
+ "name": "name",
+ "operator": "=",
+ "value": "value",
+ }
+ ],
+ )
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_raw_response_query_metrics(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.telemetry.with_raw_response.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ telemetry = await response.parse()
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_streaming_response_query_metrics(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.telemetry.with_streaming_response.query_metrics(
+ metric_name="metric_name",
+ query_type="range",
+ start_time=0,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ telemetry = await response.parse()
+ assert_matches_type(TelemetryQueryMetricsResponse, telemetry, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="unsupported query params in java / kotlin")
+ @parametrize
+ async def test_path_params_query_metrics(self, async_client: AsyncLlamaStackClient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `metric_name` but received ''"):
+ await async_client.telemetry.with_raw_response.query_metrics(
+ metric_name="",
+ query_type="range",
+ start_time=0,
+ )
+
@pytest.mark.skip(reason="unsupported query params in java / kotlin")
@parametrize
async def test_method_query_spans(self, async_client: AsyncLlamaStackClient) -> None:
From 0af39e63d12c10a5ac408f829fe07f6af0583bd8 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 22 Aug 2025 23:10:05 +0000
Subject: [PATCH 5/5] release: 0.2.19-alpha.1
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 26 ++++++++++++++++++++++++++
pyproject.toml | 2 +-
3 files changed, 28 insertions(+), 2 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index bce5b8a9..23b4f02c 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.2.18-alpha.3"
+ ".": "0.2.19-alpha.1"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 06c78082..c084bf81 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,31 @@
# Changelog
+## 0.2.19-alpha.1 (2025-08-22)
+
+Full Changelog: [v0.2.18-alpha.3...v0.2.19-alpha.1](https://github.com/llamastack/llama-stack-client-python/compare/v0.2.18-alpha.3...v0.2.19-alpha.1)
+
+### Features
+
+* **api:** manual updates ([119bdb2](https://github.com/llamastack/llama-stack-client-python/commit/119bdb2a862fe772ca82770937aba49ffb039bf2))
+* **api:** query_metrics, batches, changes ([c935c79](https://github.com/llamastack/llama-stack-client-python/commit/c935c79c1117613c7e9413b87d19cfd010d89796))
+* **api:** some updates to query metrics ([8f0f7a5](https://github.com/llamastack/llama-stack-client-python/commit/8f0f7a5de82f1dd3404cedff599b8a33f6e5c755))
+
+
+### Bug Fixes
+
+* **agent:** fix wrong module import in ReAct agent ([#262](https://github.com/llamastack/llama-stack-client-python/issues/262)) ([c17f3d6](https://github.com/llamastack/llama-stack-client-python/commit/c17f3d65af17d282785623864661ef2d16fcb1fc)), closes [#261](https://github.com/llamastack/llama-stack-client-python/issues/261)
+* **build:** kill explicit listing of python3.13 for now ([5284b4a](https://github.com/llamastack/llama-stack-client-python/commit/5284b4a93822e8900c05f63ddf342aab3b603aa3))
+
+
+### Chores
+
+* update github action ([af6b97e](https://github.com/llamastack/llama-stack-client-python/commit/af6b97e6ec55473a03682ea45e4bac9429fbdf78))
+
+
+### Build System
+
+* Bump version to 0.2.18 ([53d95ba](https://github.com/llamastack/llama-stack-client-python/commit/53d95bad01e4aaa8fa27438618aaa6082cd60275))
+
## 0.2.18-alpha.3 (2025-08-14)
Full Changelog: [v0.2.18-alpha.2...v0.2.18-alpha.3](https://github.com/llamastack/llama-stack-client-python/compare/v0.2.18-alpha.2...v0.2.18-alpha.3)
diff --git a/pyproject.toml b/pyproject.toml
index f0a1927f..ab7bfcf9 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "llama_stack_client"
-version = "0.2.18"
+version = "0.2.19-alpha.1"
description = "The official Python library for the llama-stack-client API"
dynamic = ["readme"]
license = "MIT"