From 119bdb2a862fe772ca82770937aba49ffb039bf2 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 21 Aug 2025 19:13:04 +0000
Subject: [PATCH 1/3] feat(api): manual updates
---
.stats.yml | 2 +-
api.md | 6 +--
src/llama_stack_client/pagination.py | 18 +++++---
src/llama_stack_client/resources/inference.py | 26 +++++------
src/llama_stack_client/types/__init__.py | 5 +--
.../chat_completion_response_stream_chunk.py | 4 +-
.../types/completion_response.py | 34 --------------
.../types/shared/__init__.py | 1 +
.../types/shared/agent_config.py | 43 ++++++++++++++++--
.../types/shared/batch_completion.py | 34 ++++++++++++--
.../types/shared/chat_completion_response.py | 4 +-
.../shared_token_log_probs.py} | 6 +--
.../types/shared_params/agent_config.py | 43 ++++++++++++++++--
src/llama_stack_client/types/tool_def.py | 38 ----------------
.../types/tool_def_param.py | 39 ----------------
.../types/tool_runtime_list_tools_response.py | 44 +++++++++++++++++--
tests/api_resources/test_inference.py | 18 ++++----
17 files changed, 197 insertions(+), 168 deletions(-)
delete mode 100644 src/llama_stack_client/types/completion_response.py
rename src/llama_stack_client/types/{token_log_probs.py => shared/shared_token_log_probs.py} (67%)
delete mode 100644 src/llama_stack_client/types/tool_def.py
delete mode 100644 src/llama_stack_client/types/tool_def_param.py
diff --git a/.stats.yml b/.stats.yml
index cd6eb84a..b90091ba 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 106
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-4f6633567c1a079df49d0cf58f37251a4bb0ee2f2a496ac83c9fee26eb325f9c.yml
openapi_spec_hash: af5b3d3bbecf48f15c90b982ccac852e
-config_hash: e67fd054e95c1e82f78f4b834e96bb65
+config_hash: ddcbd66d7ac80290da208232a746e30f
diff --git a/api.md b/api.md
index 4056f4a6..4f4c9180 100644
--- a/api.md
+++ b/api.md
@@ -20,6 +20,7 @@ from llama_stack_client.types import (
SafetyViolation,
SamplingParams,
ScoringResult,
+ SharedTokenLogProbs,
SystemMessage,
ToolCall,
ToolCallOrString,
@@ -62,7 +63,7 @@ Methods:
Types:
```python
-from llama_stack_client.types import ToolDef, ToolInvocationResult, ToolRuntimeListToolsResponse
+from llama_stack_client.types import ToolInvocationResult, ToolRuntimeListToolsResponse
```
Methods:
@@ -239,7 +240,6 @@ Types:
```python
from llama_stack_client.types import (
ChatCompletionResponseStreamChunk,
- CompletionResponse,
EmbeddingsResponse,
TokenLogProbs,
InferenceBatchChatCompletionResponse,
@@ -251,7 +251,7 @@ Methods:
- client.inference.batch_chat_completion(\*\*params) -> InferenceBatchChatCompletionResponse
- client.inference.batch_completion(\*\*params) -> BatchCompletion
- client.inference.chat_completion(\*\*params) -> ChatCompletionResponse
-- client.inference.completion(\*\*params) -> CompletionResponse
+- client.inference.completion(\*\*params) -> UnnamedTypeWithNoPropertyInfoOrParent0
- client.inference.embeddings(\*\*params) -> EmbeddingsResponse
# Embeddings
diff --git a/src/llama_stack_client/pagination.py b/src/llama_stack_client/pagination.py
index 9122ff46..67106bc5 100644
--- a/src/llama_stack_client/pagination.py
+++ b/src/llama_stack_client/pagination.py
@@ -24,10 +24,13 @@ def _get_page_items(self) -> List[_T]:
@override
def next_page_info(self) -> Optional[PageInfo]:
next_index = self.next_index
- if not next_index:
- return None
+ if next_index is None:
+ return None # type: ignore[unreachable]
+
+ length = len(self._get_page_items())
+ current_count = next_index + length
- return PageInfo(params={"start_index": next_index})
+ return PageInfo(params={"start_index": current_count})
class AsyncDatasetsIterrows(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
@@ -44,10 +47,13 @@ def _get_page_items(self) -> List[_T]:
@override
def next_page_info(self) -> Optional[PageInfo]:
next_index = self.next_index
- if not next_index:
- return None
+ if next_index is None:
+ return None # type: ignore[unreachable]
+
+ length = len(self._get_page_items())
+ current_count = next_index + length
- return PageInfo(params={"start_index": next_index})
+ return PageInfo(params={"start_index": current_count})
class SyncOpenAICursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
diff --git a/src/llama_stack_client/resources/inference.py b/src/llama_stack_client/resources/inference.py
index 7aec2dbd..a10919b5 100644
--- a/src/llama_stack_client/resources/inference.py
+++ b/src/llama_stack_client/resources/inference.py
@@ -27,10 +27,10 @@
)
from .._streaming import Stream, AsyncStream
from .._base_client import make_request_options
-from ..types.completion_response import CompletionResponse
from ..types.embeddings_response import EmbeddingsResponse
from ..types.shared_params.message import Message
from ..types.shared.batch_completion import BatchCompletion
+from ..types.inference_completion_params import UnnamedTypeWithNoPropertyInfoOrParent0
from ..types.shared_params.response_format import ResponseFormat
from ..types.shared_params.sampling_params import SamplingParams
from ..types.shared.chat_completion_response import ChatCompletionResponse
@@ -467,7 +467,7 @@ def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionResponse:
+ ) -> UnnamedTypeWithNoPropertyInfoOrParent0:
"""
Generate a completion for the given content using the specified model.
@@ -514,7 +514,7 @@ def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Stream[CompletionResponse]:
+ ) -> Stream[UnnamedTypeWithNoPropertyInfoOrParent0]:
"""
Generate a completion for the given content using the specified model.
@@ -561,7 +561,7 @@ def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionResponse | Stream[CompletionResponse]:
+ ) -> UnnamedTypeWithNoPropertyInfoOrParent0 | Stream[UnnamedTypeWithNoPropertyInfoOrParent0]:
"""
Generate a completion for the given content using the specified model.
@@ -608,7 +608,7 @@ def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionResponse | Stream[CompletionResponse]:
+ ) -> UnnamedTypeWithNoPropertyInfoOrParent0 | Stream[UnnamedTypeWithNoPropertyInfoOrParent0]:
if stream:
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
return self._post(
@@ -629,9 +629,9 @@ def completion(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=CompletionResponse,
+ cast_to=UnnamedTypeWithNoPropertyInfoOrParent0,
stream=stream or False,
- stream_cls=Stream[CompletionResponse],
+ stream_cls=Stream[UnnamedTypeWithNoPropertyInfoOrParent0],
)
@typing_extensions.deprecated("/v1/inference/embeddings is deprecated. Please use /v1/openai/v1/embeddings.")
@@ -1122,7 +1122,7 @@ async def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionResponse:
+ ) -> UnnamedTypeWithNoPropertyInfoOrParent0:
"""
Generate a completion for the given content using the specified model.
@@ -1169,7 +1169,7 @@ async def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncStream[CompletionResponse]:
+ ) -> AsyncStream[UnnamedTypeWithNoPropertyInfoOrParent0]:
"""
Generate a completion for the given content using the specified model.
@@ -1216,7 +1216,7 @@ async def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionResponse | AsyncStream[CompletionResponse]:
+ ) -> UnnamedTypeWithNoPropertyInfoOrParent0 | AsyncStream[UnnamedTypeWithNoPropertyInfoOrParent0]:
"""
Generate a completion for the given content using the specified model.
@@ -1263,7 +1263,7 @@ async def completion(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionResponse | AsyncStream[CompletionResponse]:
+ ) -> UnnamedTypeWithNoPropertyInfoOrParent0 | AsyncStream[UnnamedTypeWithNoPropertyInfoOrParent0]:
if stream:
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
return await self._post(
@@ -1284,9 +1284,9 @@ async def completion(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=CompletionResponse,
+ cast_to=UnnamedTypeWithNoPropertyInfoOrParent0,
stream=stream or False,
- stream_cls=AsyncStream[CompletionResponse],
+ stream_cls=AsyncStream[UnnamedTypeWithNoPropertyInfoOrParent0],
)
@typing_extensions.deprecated("/v1/inference/embeddings is deprecated. Please use /v1/openai/v1/embeddings.")
diff --git a/src/llama_stack_client/types/__init__.py b/src/llama_stack_client/types/__init__.py
index 887f1706..06561221 100644
--- a/src/llama_stack_client/types/__init__.py
+++ b/src/llama_stack_client/types/__init__.py
@@ -27,6 +27,7 @@
ToolCallOrString as ToolCallOrString,
CompletionMessage as CompletionMessage,
InterleavedContent as InterleavedContent,
+ SharedTokenLogProbs as SharedTokenLogProbs,
ToolParamDefinition as ToolParamDefinition,
ToolResponseMessage as ToolResponseMessage,
QueryGeneratorConfig as QueryGeneratorConfig,
@@ -34,7 +35,6 @@
InterleavedContentItem as InterleavedContentItem,
)
from .shield import Shield as Shield
-from .tool_def import ToolDef as ToolDef
from .benchmark import Benchmark as Benchmark
from .route_info import RouteInfo as RouteInfo
from .scoring_fn import ScoringFn as ScoringFn
@@ -46,10 +46,8 @@
from .provider_info import ProviderInfo as ProviderInfo
from .tool_response import ToolResponse as ToolResponse
from .inference_step import InferenceStep as InferenceStep
-from .tool_def_param import ToolDefParam as ToolDefParam
from .create_response import CreateResponse as CreateResponse
from .response_object import ResponseObject as ResponseObject
-from .token_log_probs import TokenLogProbs as TokenLogProbs
from .file_list_params import FileListParams as FileListParams
from .shield_call_step import ShieldCallStep as ShieldCallStep
from .span_with_status import SpanWithStatus as SpanWithStatus
@@ -62,7 +60,6 @@
from .tool_list_response import ToolListResponse as ToolListResponse
from .agent_create_params import AgentCreateParams as AgentCreateParams
from .agent_list_response import AgentListResponse as AgentListResponse
-from .completion_response import CompletionResponse as CompletionResponse
from .embeddings_response import EmbeddingsResponse as EmbeddingsResponse
from .list_files_response import ListFilesResponse as ListFilesResponse
from .list_tools_response import ListToolsResponse as ListToolsResponse
diff --git a/src/llama_stack_client/types/chat_completion_response_stream_chunk.py b/src/llama_stack_client/types/chat_completion_response_stream_chunk.py
index 2b94eb18..3d4ac9a5 100644
--- a/src/llama_stack_client/types/chat_completion_response_stream_chunk.py
+++ b/src/llama_stack_client/types/chat_completion_response_stream_chunk.py
@@ -4,8 +4,8 @@
from typing_extensions import Literal
from .._models import BaseModel
-from .token_log_probs import TokenLogProbs
from .shared.content_delta import ContentDelta
+from .shared.shared_token_log_probs import SharedTokenLogProbs
__all__ = ["ChatCompletionResponseStreamChunk", "Event", "Metric"]
@@ -20,7 +20,7 @@ class Event(BaseModel):
event_type: Literal["start", "complete", "progress"]
"""Type of the event"""
- logprobs: Optional[List[TokenLogProbs]] = None
+ logprobs: Optional[List[SharedTokenLogProbs]] = None
"""Optional log probabilities for generated tokens"""
stop_reason: Optional[Literal["end_of_turn", "end_of_message", "out_of_tokens"]] = None
diff --git a/src/llama_stack_client/types/completion_response.py b/src/llama_stack_client/types/completion_response.py
deleted file mode 100644
index 51772801..00000000
--- a/src/llama_stack_client/types/completion_response.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .token_log_probs import TokenLogProbs
-
-__all__ = ["CompletionResponse", "Metric"]
-
-
-class Metric(BaseModel):
- metric: str
- """The name of the metric"""
-
- value: float
- """The numeric value of the metric"""
-
- unit: Optional[str] = None
- """(Optional) The unit of measurement for the metric value"""
-
-
-class CompletionResponse(BaseModel):
- content: str
- """The generated completion text"""
-
- stop_reason: Literal["end_of_turn", "end_of_message", "out_of_tokens"]
- """Reason why generation stopped"""
-
- logprobs: Optional[List[TokenLogProbs]] = None
- """Optional log probabilities for generated tokens"""
-
- metrics: Optional[List[Metric]] = None
- """(Optional) List of metrics associated with the API response"""
diff --git a/src/llama_stack_client/types/shared/__init__.py b/src/llama_stack_client/types/shared/__init__.py
index 0fe46810..3a8b0c83 100644
--- a/src/llama_stack_client/types/shared/__init__.py
+++ b/src/llama_stack_client/types/shared/__init__.py
@@ -22,5 +22,6 @@
from .tool_param_definition import ToolParamDefinition as ToolParamDefinition
from .tool_response_message import ToolResponseMessage as ToolResponseMessage
from .query_generator_config import QueryGeneratorConfig as QueryGeneratorConfig
+from .shared_token_log_probs import SharedTokenLogProbs as SharedTokenLogProbs
from .chat_completion_response import ChatCompletionResponse as ChatCompletionResponse
from .interleaved_content_item import InterleavedContentItem as InterleavedContentItem
diff --git a/src/llama_stack_client/types/shared/agent_config.py b/src/llama_stack_client/types/shared/agent_config.py
index eb116159..ba18c52b 100644
--- a/src/llama_stack_client/types/shared/agent_config.py
+++ b/src/llama_stack_client/types/shared/agent_config.py
@@ -4,11 +4,48 @@
from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
-from ..tool_def import ToolDef
from .response_format import ResponseFormat
from .sampling_params import SamplingParams
-__all__ = ["AgentConfig", "ToolConfig", "Toolgroup", "ToolgroupAgentToolGroupWithArgs"]
+__all__ = [
+ "AgentConfig",
+ "ClientTool",
+ "ClientToolParameter",
+ "ToolConfig",
+ "Toolgroup",
+ "ToolgroupAgentToolGroupWithArgs",
+]
+
+
+class ClientToolParameter(BaseModel):
+ description: str
+ """Human-readable description of what the parameter does"""
+
+ name: str
+ """Name of the parameter"""
+
+ parameter_type: str
+ """Type of the parameter (e.g., string, integer)"""
+
+ required: bool
+ """Whether this parameter is required for tool invocation"""
+
+ default: Union[bool, float, str, List[object], object, None] = None
+ """(Optional) Default value for the parameter if not provided"""
+
+
+class ClientTool(BaseModel):
+ name: str
+ """Name of the tool"""
+
+ description: Optional[str] = None
+ """(Optional) Human-readable description of what the tool does"""
+
+ metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """(Optional) Additional metadata about the tool"""
+
+ parameters: Optional[List[ClientToolParameter]] = None
+ """(Optional) List of parameters this tool accepts"""
class ToolConfig(BaseModel):
@@ -56,7 +93,7 @@ class AgentConfig(BaseModel):
model: str
"""The model identifier to use for the agent"""
- client_tools: Optional[List[ToolDef]] = None
+ client_tools: Optional[List[ClientTool]] = None
enable_session_persistence: Optional[bool] = None
"""Optional flag indicating whether session data has to be persisted"""
diff --git a/src/llama_stack_client/types/shared/batch_completion.py b/src/llama_stack_client/types/shared/batch_completion.py
index 43a0a735..450861b8 100644
--- a/src/llama_stack_client/types/shared/batch_completion.py
+++ b/src/llama_stack_client/types/shared/batch_completion.py
@@ -1,13 +1,39 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List
+from typing import List, Optional
+from typing_extensions import Literal
from ..._models import BaseModel
-from ..completion_response import CompletionResponse
+from .shared_token_log_probs import SharedTokenLogProbs
-__all__ = ["BatchCompletion"]
+__all__ = ["BatchCompletion", "Batch", "BatchMetric"]
+
+
+class BatchMetric(BaseModel):
+ metric: str
+ """The name of the metric"""
+
+ value: float
+ """The numeric value of the metric"""
+
+ unit: Optional[str] = None
+ """(Optional) The unit of measurement for the metric value"""
+
+
+class Batch(BaseModel):
+ content: str
+ """The generated completion text"""
+
+ stop_reason: Literal["end_of_turn", "end_of_message", "out_of_tokens"]
+ """Reason why generation stopped"""
+
+ logprobs: Optional[List[SharedTokenLogProbs]] = None
+ """Optional log probabilities for generated tokens"""
+
+ metrics: Optional[List[BatchMetric]] = None
+ """(Optional) List of metrics associated with the API response"""
class BatchCompletion(BaseModel):
- batch: List[CompletionResponse]
+ batch: List[Batch]
"""List of completion responses, one for each input in the batch"""
diff --git a/src/llama_stack_client/types/shared/chat_completion_response.py b/src/llama_stack_client/types/shared/chat_completion_response.py
index 3ff6e0bc..83519944 100644
--- a/src/llama_stack_client/types/shared/chat_completion_response.py
+++ b/src/llama_stack_client/types/shared/chat_completion_response.py
@@ -3,8 +3,8 @@
from typing import List, Optional
from ..._models import BaseModel
-from ..token_log_probs import TokenLogProbs
from .completion_message import CompletionMessage
+from .shared_token_log_probs import SharedTokenLogProbs
__all__ = ["ChatCompletionResponse", "Metric"]
@@ -24,7 +24,7 @@ class ChatCompletionResponse(BaseModel):
completion_message: CompletionMessage
"""The complete response message"""
- logprobs: Optional[List[TokenLogProbs]] = None
+ logprobs: Optional[List[SharedTokenLogProbs]] = None
"""Optional log probabilities for generated tokens"""
metrics: Optional[List[Metric]] = None
diff --git a/src/llama_stack_client/types/token_log_probs.py b/src/llama_stack_client/types/shared/shared_token_log_probs.py
similarity index 67%
rename from src/llama_stack_client/types/token_log_probs.py
rename to src/llama_stack_client/types/shared/shared_token_log_probs.py
index b1a0a2b4..904e69e3 100644
--- a/src/llama_stack_client/types/token_log_probs.py
+++ b/src/llama_stack_client/types/shared/shared_token_log_probs.py
@@ -2,11 +2,11 @@
from typing import Dict
-from .._models import BaseModel
+from ..._models import BaseModel
-__all__ = ["TokenLogProbs"]
+__all__ = ["SharedTokenLogProbs"]
-class TokenLogProbs(BaseModel):
+class SharedTokenLogProbs(BaseModel):
logprobs_by_token: Dict[str, float]
"""Dictionary mapping tokens to their log probabilities"""
diff --git a/src/llama_stack_client/types/shared_params/agent_config.py b/src/llama_stack_client/types/shared_params/agent_config.py
index 5cebec3f..676ec749 100644
--- a/src/llama_stack_client/types/shared_params/agent_config.py
+++ b/src/llama_stack_client/types/shared_params/agent_config.py
@@ -5,11 +5,48 @@
from typing import Dict, List, Union, Iterable
from typing_extensions import Literal, Required, TypeAlias, TypedDict
-from ..tool_def_param import ToolDefParam
from .response_format import ResponseFormat
from .sampling_params import SamplingParams
-__all__ = ["AgentConfig", "ToolConfig", "Toolgroup", "ToolgroupAgentToolGroupWithArgs"]
+__all__ = [
+ "AgentConfig",
+ "ClientTool",
+ "ClientToolParameter",
+ "ToolConfig",
+ "Toolgroup",
+ "ToolgroupAgentToolGroupWithArgs",
+]
+
+
+class ClientToolParameter(TypedDict, total=False):
+ description: Required[str]
+ """Human-readable description of what the parameter does"""
+
+ name: Required[str]
+ """Name of the parameter"""
+
+ parameter_type: Required[str]
+ """Type of the parameter (e.g., string, integer)"""
+
+ required: Required[bool]
+ """Whether this parameter is required for tool invocation"""
+
+ default: Union[bool, float, str, Iterable[object], object, None]
+ """(Optional) Default value for the parameter if not provided"""
+
+
+class ClientTool(TypedDict, total=False):
+ name: Required[str]
+ """Name of the tool"""
+
+ description: str
+ """(Optional) Human-readable description of what the tool does"""
+
+ metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
+ """(Optional) Additional metadata about the tool"""
+
+ parameters: Iterable[ClientToolParameter]
+ """(Optional) List of parameters this tool accepts"""
class ToolConfig(TypedDict, total=False):
@@ -57,7 +94,7 @@ class AgentConfig(TypedDict, total=False):
model: Required[str]
"""The model identifier to use for the agent"""
- client_tools: Iterable[ToolDefParam]
+ client_tools: Iterable[ClientTool]
enable_session_persistence: bool
"""Optional flag indicating whether session data has to be persisted"""
diff --git a/src/llama_stack_client/types/tool_def.py b/src/llama_stack_client/types/tool_def.py
deleted file mode 100644
index c82a9b8a..00000000
--- a/src/llama_stack_client/types/tool_def.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-
-from .._models import BaseModel
-
-__all__ = ["ToolDef", "Parameter"]
-
-
-class Parameter(BaseModel):
- description: str
- """Human-readable description of what the parameter does"""
-
- name: str
- """Name of the parameter"""
-
- parameter_type: str
- """Type of the parameter (e.g., string, integer)"""
-
- required: bool
- """Whether this parameter is required for tool invocation"""
-
- default: Union[bool, float, str, List[object], object, None] = None
- """(Optional) Default value for the parameter if not provided"""
-
-
-class ToolDef(BaseModel):
- name: str
- """Name of the tool"""
-
- description: Optional[str] = None
- """(Optional) Human-readable description of what the tool does"""
-
- metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
- """(Optional) Additional metadata about the tool"""
-
- parameters: Optional[List[Parameter]] = None
- """(Optional) List of parameters this tool accepts"""
diff --git a/src/llama_stack_client/types/tool_def_param.py b/src/llama_stack_client/types/tool_def_param.py
deleted file mode 100644
index 93ad8285..00000000
--- a/src/llama_stack_client/types/tool_def_param.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable
-from typing_extensions import Required, TypedDict
-
-__all__ = ["ToolDefParam", "Parameter"]
-
-
-class Parameter(TypedDict, total=False):
- description: Required[str]
- """Human-readable description of what the parameter does"""
-
- name: Required[str]
- """Name of the parameter"""
-
- parameter_type: Required[str]
- """Type of the parameter (e.g., string, integer)"""
-
- required: Required[bool]
- """Whether this parameter is required for tool invocation"""
-
- default: Union[bool, float, str, Iterable[object], object, None]
- """(Optional) Default value for the parameter if not provided"""
-
-
-class ToolDefParam(TypedDict, total=False):
- name: Required[str]
- """Name of the tool"""
-
- description: str
- """(Optional) Human-readable description of what the tool does"""
-
- metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]]
- """(Optional) Additional metadata about the tool"""
-
- parameters: Iterable[Parameter]
- """(Optional) List of parameters this tool accepts"""
diff --git a/src/llama_stack_client/types/tool_runtime_list_tools_response.py b/src/llama_stack_client/types/tool_runtime_list_tools_response.py
index cd65754f..9982d1d7 100644
--- a/src/llama_stack_client/types/tool_runtime_list_tools_response.py
+++ b/src/llama_stack_client/types/tool_runtime_list_tools_response.py
@@ -1,10 +1,46 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List
+from typing import Dict, List, Union, Optional
from typing_extensions import TypeAlias
-from .tool_def import ToolDef
+from .._models import BaseModel
-__all__ = ["ToolRuntimeListToolsResponse"]
+__all__ = [
+ "ToolRuntimeListToolsResponse",
+ "ToolRuntimeListToolsResponseItem",
+ "ToolRuntimeListToolsResponseItemParameter",
+]
-ToolRuntimeListToolsResponse: TypeAlias = List[ToolDef]
+
+class ToolRuntimeListToolsResponseItemParameter(BaseModel):
+ description: str
+ """Human-readable description of what the parameter does"""
+
+ name: str
+ """Name of the parameter"""
+
+ parameter_type: str
+ """Type of the parameter (e.g., string, integer)"""
+
+ required: bool
+ """Whether this parameter is required for tool invocation"""
+
+ default: Union[bool, float, str, List[object], object, None] = None
+ """(Optional) Default value for the parameter if not provided"""
+
+
+class ToolRuntimeListToolsResponseItem(BaseModel):
+ name: str
+ """Name of the tool"""
+
+ description: Optional[str] = None
+ """(Optional) Human-readable description of what the tool does"""
+
+ metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None
+ """(Optional) Additional metadata about the tool"""
+
+ parameters: Optional[List[ToolRuntimeListToolsResponseItemParameter]] = None
+ """(Optional) List of parameters this tool accepts"""
+
+
+ToolRuntimeListToolsResponse: TypeAlias = List[ToolRuntimeListToolsResponseItem]
diff --git a/tests/api_resources/test_inference.py b/tests/api_resources/test_inference.py
index d5ef46d1..59557a5e 100644
--- a/tests/api_resources/test_inference.py
+++ b/tests/api_resources/test_inference.py
@@ -10,11 +10,11 @@
from tests.utils import assert_matches_type
from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
from llama_stack_client.types import (
- CompletionResponse,
EmbeddingsResponse,
InferenceBatchChatCompletionResponse,
)
from llama_stack_client.types.shared import BatchCompletion, ChatCompletionResponse
+from llama_stack_client.types.inference_completion_params import UnnamedTypeWithNoPropertyInfoOrParent0
# pyright: reportDeprecated=false
@@ -392,7 +392,7 @@ def test_method_completion_overload_1(self, client: LlamaStackClient) -> None:
model_id="model_id",
)
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
@parametrize
def test_method_completion_with_all_params_overload_1(self, client: LlamaStackClient) -> None:
@@ -414,7 +414,7 @@ def test_method_completion_with_all_params_overload_1(self, client: LlamaStackCl
stream=False,
)
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
@parametrize
def test_raw_response_completion_overload_1(self, client: LlamaStackClient) -> None:
@@ -427,7 +427,7 @@ def test_raw_response_completion_overload_1(self, client: LlamaStackClient) -> N
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
inference = response.parse()
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
@parametrize
def test_streaming_response_completion_overload_1(self, client: LlamaStackClient) -> None:
@@ -440,7 +440,7 @@ def test_streaming_response_completion_overload_1(self, client: LlamaStackClient
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
inference = response.parse()
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -931,7 +931,7 @@ async def test_method_completion_overload_1(self, async_client: AsyncLlamaStackC
model_id="model_id",
)
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
@parametrize
async def test_method_completion_with_all_params_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
@@ -953,7 +953,7 @@ async def test_method_completion_with_all_params_overload_1(self, async_client:
stream=False,
)
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
@parametrize
async def test_raw_response_completion_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
@@ -966,7 +966,7 @@ async def test_raw_response_completion_overload_1(self, async_client: AsyncLlama
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
inference = await response.parse()
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
@parametrize
async def test_streaming_response_completion_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
@@ -979,7 +979,7 @@ async def test_streaming_response_completion_overload_1(self, async_client: Asyn
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
inference = await response.parse()
- assert_matches_type(CompletionResponse, inference, path=["response"])
+ assert_matches_type(UnnamedTypeWithNoPropertyInfoOrParent0, inference, path=["response"])
assert cast(Any, response.is_closed) is True
From af6b97e6ec55473a03682ea45e4bac9429fbdf78 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 22 Aug 2025 05:35:22 +0000
Subject: [PATCH 2/3] chore: update github action
---
.github/workflows/ci.yml | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ee914c4e..2c0a0c04 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -36,7 +36,7 @@ jobs:
run: ./scripts/lint
build:
- if: github.repository == 'stainless-sdks/llama-stack-client-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork)
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
timeout-minutes: 10
name: build
permissions:
@@ -61,12 +61,14 @@ jobs:
run: rye build
- name: Get GitHub OIDC Token
+ if: github.repository == 'stainless-sdks/llama-stack-client-python'
id: github-oidc
uses: actions/github-script@v6
with:
script: core.setOutput('github_token', await core.getIDToken());
- name: Upload tarball
+ if: github.repository == 'stainless-sdks/llama-stack-client-python'
env:
URL: https://pkg.stainless.com/s
AUTH: ${{ steps.github-oidc.outputs.github_token }}
From a2023f2cb0f37fea0f5859a7a83d3e86040bdb92 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 22 Aug 2025 17:20:02 +0000
Subject: [PATCH 3/3] release: 0.3.0-alpha.1
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 24 ++++++++++++++++++++++++
pyproject.toml | 2 +-
3 files changed, 26 insertions(+), 2 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index bce5b8a9..1ae25264 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.2.18-alpha.3"
+ ".": "0.3.0-alpha.1"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 06c78082..ee19a6b4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,29 @@
# Changelog
+## 0.3.0-alpha.1 (2025-08-22)
+
+Full Changelog: [v0.2.18-alpha.3...v0.3.0-alpha.1](https://github.com/llamastack/llama-stack-client-python/compare/v0.2.18-alpha.3...v0.3.0-alpha.1)
+
+### Features
+
+* **api:** manual updates ([119bdb2](https://github.com/llamastack/llama-stack-client-python/commit/119bdb2a862fe772ca82770937aba49ffb039bf2))
+
+
+### Bug Fixes
+
+* **agent:** fix wrong module import in ReAct agent ([#262](https://github.com/llamastack/llama-stack-client-python/issues/262)) ([c17f3d6](https://github.com/llamastack/llama-stack-client-python/commit/c17f3d65af17d282785623864661ef2d16fcb1fc)), closes [#261](https://github.com/llamastack/llama-stack-client-python/issues/261)
+* **build:** kill explicit listing of python3.13 for now ([5284b4a](https://github.com/llamastack/llama-stack-client-python/commit/5284b4a93822e8900c05f63ddf342aab3b603aa3))
+
+
+### Chores
+
+* update github action ([af6b97e](https://github.com/llamastack/llama-stack-client-python/commit/af6b97e6ec55473a03682ea45e4bac9429fbdf78))
+
+
+### Build System
+
+* Bump version to 0.2.18 ([53d95ba](https://github.com/llamastack/llama-stack-client-python/commit/53d95bad01e4aaa8fa27438618aaa6082cd60275))
+
## 0.2.18-alpha.3 (2025-08-14)
Full Changelog: [v0.2.18-alpha.2...v0.2.18-alpha.3](https://github.com/llamastack/llama-stack-client-python/compare/v0.2.18-alpha.2...v0.2.18-alpha.3)
diff --git a/pyproject.toml b/pyproject.toml
index f0a1927f..55f1799e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "llama_stack_client"
-version = "0.2.18"
+version = "0.3.0-alpha.1"
description = "The official Python library for the llama-stack-client API"
dynamic = ["readme"]
license = "MIT"