diff --git a/src/llama_stack_client/resources/agents/turn.py b/src/llama_stack_client/resources/agents/turn.py index 272ea4d9..4ee4bda3 100644 --- a/src/llama_stack_client/resources/agents/turn.py +++ b/src/llama_stack_client/resources/agents/turn.py @@ -25,7 +25,6 @@ from ..._base_client import make_request_options from ...types.agents import turn_create_params from ...types.agents.turn import Turn -from ...types.agents.agent_turn_response_stream_chunk import AgentTurnResponseStreamChunk __all__ = ["TurnResource", "AsyncTurnResource"] @@ -95,7 +94,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Stream[AgentTurnResponseStreamChunk]: + ) -> Stream[Turn]: """ Args: extra_headers: Send extra headers @@ -124,7 +123,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Turn | Stream[AgentTurnResponseStreamChunk]: + ) -> Turn | Stream[Turn]: """ Args: extra_headers: Send extra headers @@ -153,7 +152,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Turn | Stream[AgentTurnResponseStreamChunk]: + ) -> Turn | Stream[Turn]: if not agent_id: raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") if not session_id: @@ -174,7 +173,7 @@ def create( ), cast_to=Turn, stream=stream or False, - stream_cls=Stream[AgentTurnResponseStreamChunk], + stream_cls=Stream[Turn], ) def retrieve( @@ -280,7 +279,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncStream[AgentTurnResponseStreamChunk]: + ) -> AsyncStream[Turn]: """ Args: extra_headers: Send extra headers @@ -309,7 +308,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Turn | AsyncStream[AgentTurnResponseStreamChunk]: + ) -> Turn | AsyncStream[Turn]: """ Args: extra_headers: Send extra headers @@ -338,7 +337,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Turn | AsyncStream[AgentTurnResponseStreamChunk]: + ) -> Turn | AsyncStream[Turn]: if not agent_id: raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") if not session_id: @@ -359,7 +358,7 @@ async def create( ), cast_to=Turn, stream=stream or False, - stream_cls=AsyncStream[AgentTurnResponseStreamChunk], + stream_cls=AsyncStream[Turn], ) async def retrieve( diff --git a/src/llama_stack_client/resources/post_training/job.py b/src/llama_stack_client/resources/post_training/job.py index e6b67120..28f9f66b 100644 --- a/src/llama_stack_client/resources/post_training/job.py +++ b/src/llama_stack_client/resources/post_training/job.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Type, Optional, cast +from typing import List, Type, Optional, cast import httpx @@ -22,7 +22,7 @@ from ..._wrappers import DataWrapper from ..._base_client import make_request_options from ...types.post_training import job_cancel_params, job_status_params, job_artifacts_params -from ...types.post_training.job_list_response import JobListResponse +from ...types.list_post_training_jobs_response import Data from ...types.post_training.job_status_response import JobStatusResponse from ...types.post_training.job_artifacts_response import JobArtifactsResponse @@ -58,7 +58,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> JobListResponse: + ) -> List[Data]: return self._get( "/v1/post-training/jobs", options=make_request_options( @@ -66,9 +66,9 @@ def list( extra_query=extra_query, extra_body=extra_body, timeout=timeout, - post_parser=DataWrapper[JobListResponse]._unwrapper, + post_parser=DataWrapper[List[Data]]._unwrapper, ), - cast_to=cast(Type[JobListResponse], DataWrapper[JobListResponse]), + cast_to=cast(Type[List[Data]], DataWrapper[Data]), ) def artifacts( @@ -198,7 +198,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> JobListResponse: + ) -> List[Data]: return await self._get( "/v1/post-training/jobs", options=make_request_options( @@ -206,9 +206,9 @@ async def list( extra_query=extra_query, extra_body=extra_body, timeout=timeout, - post_parser=DataWrapper[JobListResponse]._unwrapper, + post_parser=DataWrapper[List[Data]]._unwrapper, ), - cast_to=cast(Type[JobListResponse], DataWrapper[JobListResponse]), + cast_to=cast(Type[List[Data]], DataWrapper[Data]), ) async def artifacts( diff --git a/src/llama_stack_client/types/agents/__init__.py b/src/llama_stack_client/types/agents/__init__.py index be21f291..517ddb01 100644 --- a/src/llama_stack_client/types/agents/__init__.py +++ b/src/llama_stack_client/types/agents/__init__.py @@ -5,10 +5,7 @@ from .turn import Turn as Turn from .session import Session as Session from .turn_create_params import TurnCreateParams as TurnCreateParams -from .turn_response_event import TurnResponseEvent as TurnResponseEvent from .session_create_params import SessionCreateParams as SessionCreateParams from .step_retrieve_response import StepRetrieveResponse as StepRetrieveResponse from .session_create_response import SessionCreateResponse as SessionCreateResponse from .session_retrieve_params import SessionRetrieveParams as SessionRetrieveParams -from .turn_response_event_payload import TurnResponseEventPayload as TurnResponseEventPayload -from .agent_turn_response_stream_chunk import AgentTurnResponseStreamChunk as AgentTurnResponseStreamChunk diff --git a/src/llama_stack_client/types/inference_step.py b/src/llama_stack_client/types/inference_step.py index de049820..ba429fa3 100644 --- a/src/llama_stack_client/types/inference_step.py +++ b/src/llama_stack_client/types/inference_step.py @@ -13,7 +13,7 @@ class InferenceStep(BaseModel): - inference_model_response: CompletionMessage = FieldInfo(alias="model_response") + api_model_response: CompletionMessage = FieldInfo(alias="model_response") step_id: str diff --git a/src/llama_stack_client/types/list_datasets_response.py b/src/llama_stack_client/types/list_datasets_response.py index 3af9d927..635c9c88 100644 --- a/src/llama_stack_client/types/list_datasets_response.py +++ b/src/llama_stack_client/types/list_datasets_response.py @@ -1,30 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict, List, Union -from typing_extensions import Literal from .._models import BaseModel -from .shared.url import URL -from .shared.param_type import ParamType +from .dataset_list_response import DatasetListResponse -__all__ = ["ListDatasetsResponse", "Data"] - - -class Data(BaseModel): - dataset_schema: Dict[str, ParamType] - - identifier: str - - metadata: Dict[str, Union[bool, float, str, List[object], object, None]] - - provider_id: str - - provider_resource_id: str - - type: Literal["dataset"] - - url: URL +__all__ = ["ListDatasetsResponse"] class ListDatasetsResponse(BaseModel): - data: List[Data] + data: DatasetListResponse diff --git a/src/llama_stack_client/types/list_eval_tasks_response.py b/src/llama_stack_client/types/list_eval_tasks_response.py index 099b2a33..4037c0dc 100644 --- a/src/llama_stack_client/types/list_eval_tasks_response.py +++ b/src/llama_stack_client/types/list_eval_tasks_response.py @@ -1,12 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List from .._models import BaseModel -from .eval_task import EvalTask +from .eval_task_list_response import EvalTaskListResponse __all__ = ["ListEvalTasksResponse"] class ListEvalTasksResponse(BaseModel): - data: List[EvalTask] + data: EvalTaskListResponse diff --git a/src/llama_stack_client/types/list_models_response.py b/src/llama_stack_client/types/list_models_response.py index de12337c..32dcc9d9 100644 --- a/src/llama_stack_client/types/list_models_response.py +++ b/src/llama_stack_client/types/list_models_response.py @@ -1,12 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List -from .model import Model from .._models import BaseModel +from .model_list_response import ModelListResponse __all__ = ["ListModelsResponse"] class ListModelsResponse(BaseModel): - data: List[Model] + data: ModelListResponse diff --git a/src/llama_stack_client/types/list_providers_response.py b/src/llama_stack_client/types/list_providers_response.py index 11fbd2b7..cbe69e3b 100644 --- a/src/llama_stack_client/types/list_providers_response.py +++ b/src/llama_stack_client/types/list_providers_response.py @@ -1,12 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List from .._models import BaseModel -from .provider_info import ProviderInfo +from .provider_list_response import ProviderListResponse __all__ = ["ListProvidersResponse"] class ListProvidersResponse(BaseModel): - data: List[ProviderInfo] + data: ProviderListResponse diff --git a/src/llama_stack_client/types/list_routes_response.py b/src/llama_stack_client/types/list_routes_response.py index 96d7a402..02cbd1e3 100644 --- a/src/llama_stack_client/types/list_routes_response.py +++ b/src/llama_stack_client/types/list_routes_response.py @@ -1,12 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List from .._models import BaseModel -from .route_info import RouteInfo +from .route_list_response import RouteListResponse __all__ = ["ListRoutesResponse"] class ListRoutesResponse(BaseModel): - data: List[RouteInfo] + data: RouteListResponse diff --git a/src/llama_stack_client/types/list_scoring_functions_response.py b/src/llama_stack_client/types/list_scoring_functions_response.py index 2a26e373..845c37be 100644 --- a/src/llama_stack_client/types/list_scoring_functions_response.py +++ b/src/llama_stack_client/types/list_scoring_functions_response.py @@ -1,12 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List from .._models import BaseModel -from .scoring_fn import ScoringFn +from .scoring_function_list_response import ScoringFunctionListResponse __all__ = ["ListScoringFunctionsResponse"] class ListScoringFunctionsResponse(BaseModel): - data: List[ScoringFn] + data: ScoringFunctionListResponse diff --git a/src/llama_stack_client/types/list_shields_response.py b/src/llama_stack_client/types/list_shields_response.py index 6e445eec..35d1650d 100644 --- a/src/llama_stack_client/types/list_shields_response.py +++ b/src/llama_stack_client/types/list_shields_response.py @@ -1,12 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List -from .shield import Shield from .._models import BaseModel +from .shield_list_response import ShieldListResponse __all__ = ["ListShieldsResponse"] class ListShieldsResponse(BaseModel): - data: List[Shield] + data: ShieldListResponse diff --git a/src/llama_stack_client/types/list_tool_groups_response.py b/src/llama_stack_client/types/list_tool_groups_response.py index 6640ca20..fec39d2f 100644 --- a/src/llama_stack_client/types/list_tool_groups_response.py +++ b/src/llama_stack_client/types/list_tool_groups_response.py @@ -1,12 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List from .._models import BaseModel -from .tool_group import ToolGroup +from .toolgroup_list_response import ToolgroupListResponse __all__ = ["ListToolGroupsResponse"] class ListToolGroupsResponse(BaseModel): - data: List[ToolGroup] + data: ToolgroupListResponse diff --git a/src/llama_stack_client/types/list_tools_response.py b/src/llama_stack_client/types/list_tools_response.py index 02d2e739..02013c4f 100644 --- a/src/llama_stack_client/types/list_tools_response.py +++ b/src/llama_stack_client/types/list_tools_response.py @@ -1,12 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List -from .tool import Tool from .._models import BaseModel +from .tool_list_response import ToolListResponse __all__ = ["ListToolsResponse"] class ListToolsResponse(BaseModel): - data: List[Tool] + data: ToolListResponse diff --git a/src/llama_stack_client/types/list_vector_dbs_response.py b/src/llama_stack_client/types/list_vector_dbs_response.py index 1b293a6a..7d64c3d6 100644 --- a/src/llama_stack_client/types/list_vector_dbs_response.py +++ b/src/llama_stack_client/types/list_vector_dbs_response.py @@ -1,26 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List -from typing_extensions import Literal from .._models import BaseModel +from .vector_db_list_response import VectorDBListResponse -__all__ = ["ListVectorDBsResponse", "Data"] - - -class Data(BaseModel): - embedding_dimension: int - - embedding_model: str - - identifier: str - - provider_id: str - - provider_resource_id: str - - type: Literal["vector_db"] +__all__ = ["ListVectorDBsResponse"] class ListVectorDBsResponse(BaseModel): - data: List[Data] + data: VectorDBListResponse diff --git a/src/llama_stack_client/types/query_spans_response.py b/src/llama_stack_client/types/query_spans_response.py index b7a9048e..5c54e623 100644 --- a/src/llama_stack_client/types/query_spans_response.py +++ b/src/llama_stack_client/types/query_spans_response.py @@ -1,28 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict, List, Union, Optional -from datetime import datetime from .._models import BaseModel +from .telemetry_query_spans_response import TelemetryQuerySpansResponse -__all__ = ["QuerySpansResponse", "Data"] - - -class Data(BaseModel): - name: str - - span_id: str - - start_time: datetime - - trace_id: str - - attributes: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None - - end_time: Optional[datetime] = None - - parent_span_id: Optional[str] = None +__all__ = ["QuerySpansResponse"] class QuerySpansResponse(BaseModel): - data: List[Data] + data: TelemetryQuerySpansResponse diff --git a/tests/api_resources/post_training/test_job.py b/tests/api_resources/post_training/test_job.py index 2ddb2f30..c38838d7 100644 --- a/tests/api_resources/post_training/test_job.py +++ b/tests/api_resources/post_training/test_job.py @@ -3,17 +3,17 @@ from __future__ import annotations import os -from typing import Any, Optional, cast +from typing import Any, List, Optional, cast import pytest from tests.utils import assert_matches_type from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient from llama_stack_client.types.post_training import ( - JobListResponse, JobStatusResponse, JobArtifactsResponse, ) +from llama_stack_client.types.list_post_training_jobs_response import Data base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -24,7 +24,7 @@ class TestJob: @parametrize def test_method_list(self, client: LlamaStackClient) -> None: job = client.post_training.job.list() - assert_matches_type(JobListResponse, job, path=["response"]) + assert_matches_type(List[Data], job, path=["response"]) @parametrize def test_raw_response_list(self, client: LlamaStackClient) -> None: @@ -33,7 +33,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() - assert_matches_type(JobListResponse, job, path=["response"]) + assert_matches_type(List[Data], job, path=["response"]) @parametrize def test_streaming_response_list(self, client: LlamaStackClient) -> None: @@ -42,7 +42,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() - assert_matches_type(JobListResponse, job, path=["response"]) + assert_matches_type(List[Data], job, path=["response"]) assert cast(Any, response.is_closed) is True @@ -146,7 +146,7 @@ class TestAsyncJob: @parametrize async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: job = await async_client.post_training.job.list() - assert_matches_type(JobListResponse, job, path=["response"]) + assert_matches_type(List[Data], job, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: @@ -155,7 +155,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = await response.parse() - assert_matches_type(JobListResponse, job, path=["response"]) + assert_matches_type(List[Data], job, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: @@ -164,7 +164,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = await response.parse() - assert_matches_type(JobListResponse, job, path=["response"]) + assert_matches_type(List[Data], job, path=["response"]) assert cast(Any, response.is_closed) is True