diff --git a/src/llama_stack_client/_client.py b/src/llama_stack_client/_client.py index 76166b33..429826ad 100644 --- a/src/llama_stack_client/_client.py +++ b/src/llama_stack_client/_client.py @@ -1,62 +1,60 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - from __future__ import annotations import json import os -from typing import Any, Mapping, Union +from typing import Any, Union, Mapping +from typing_extensions import Self, override import httpx -from typing_extensions import Self, override from . import _exceptions -from ._base_client import ( - DEFAULT_MAX_RETRIES, - AsyncAPIClient, - SyncAPIClient, -) -from ._exceptions import APIStatusError from ._qs import Querystring -from ._streaming import AsyncStream as AsyncStream -from ._streaming import Stream as Stream from ._types import ( NOT_GIVEN, - NotGiven, Omit, - ProxiesTypes, - RequestOptions, Timeout, + NotGiven, Transport, + ProxiesTypes, + RequestOptions, ) from ._utils import ( - get_async_library, is_given, + get_async_library, ) from ._version import __version__ from .resources import ( - batch_inference, - datasetio, - datasets, - eval_tasks, - inference, - inspect, + tools, memory, - memory_banks, models, - providers, routes, safety, + inspect, scoring, - scoring_functions, shields, - synthetic_data_generation, + datasets, + datasetio, + inference, + providers, telemetry, - tool_runtime, + eval_tasks, toolgroups, - tools, + memory_banks, + tool_runtime, + batch_inference, + scoring_functions, + synthetic_data_generation, +) +from ._streaming import Stream as Stream, AsyncStream as AsyncStream +from ._exceptions import APIStatusError +from ._base_client import ( + DEFAULT_MAX_RETRIES, + SyncAPIClient, + AsyncAPIClient, ) -from .resources.agents import agents from .resources.eval import eval +from .resources.agents import agents from .resources.post_training import post_training __all__ = [ @@ -127,7 +125,7 @@ def __init__( if base_url is None: base_url = os.environ.get("LLAMA_STACK_CLIENT_BASE_URL") if base_url is None: - base_url = "http://any-hosted-llama-stack.com" + base_url = f"http://any-hosted-llama-stack.com" if provider_data is not None: if default_headers is None: @@ -162,9 +160,7 @@ def __init__( self.routes = routes.RoutesResource(self) self.safety = safety.SafetyResource(self) self.shields = shields.ShieldsResource(self) - self.synthetic_data_generation = ( - synthetic_data_generation.SyntheticDataGenerationResource(self) - ) + self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResource(self) self.telemetry = telemetry.TelemetryResource(self) self.datasetio = datasetio.DatasetioResource(self) self.scoring = scoring.ScoringResource(self) @@ -204,14 +200,10 @@ def copy( Create a new client instance re-using the same options given to the current client with optional overriding. """ if default_headers is not None and set_default_headers is not None: - raise ValueError( - "The `default_headers` and `set_default_headers` arguments are mutually exclusive" - ) + raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") if default_query is not None and set_default_query is not None: - raise ValueError( - "The `default_query` and `set_default_query` arguments are mutually exclusive" - ) + raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") headers = self._custom_headers if default_headers is not None: @@ -252,14 +244,10 @@ def _make_status_error( return _exceptions.BadRequestError(err_msg, response=response, body=body) if response.status_code == 401: - return _exceptions.AuthenticationError( - err_msg, response=response, body=body - ) + return _exceptions.AuthenticationError(err_msg, response=response, body=body) if response.status_code == 403: - return _exceptions.PermissionDeniedError( - err_msg, response=response, body=body - ) + return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) if response.status_code == 404: return _exceptions.NotFoundError(err_msg, response=response, body=body) @@ -268,17 +256,13 @@ def _make_status_error( return _exceptions.ConflictError(err_msg, response=response, body=body) if response.status_code == 422: - return _exceptions.UnprocessableEntityError( - err_msg, response=response, body=body - ) + return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) if response.status_code == 429: return _exceptions.RateLimitError(err_msg, response=response, body=body) if response.status_code >= 500: - return _exceptions.InternalServerError( - err_msg, response=response, body=body - ) + return _exceptions.InternalServerError(err_msg, response=response, body=body) return APIStatusError(err_msg, response=response, body=body) @@ -300,9 +284,7 @@ class AsyncLlamaStackClient(AsyncAPIClient): routes: routes.AsyncRoutesResource safety: safety.AsyncSafetyResource shields: shields.AsyncShieldsResource - synthetic_data_generation: ( - synthetic_data_generation.AsyncSyntheticDataGenerationResource - ) + synthetic_data_generation: synthetic_data_generation.AsyncSyntheticDataGenerationResource telemetry: telemetry.AsyncTelemetryResource datasetio: datasetio.AsyncDatasetioResource scoring: scoring.AsyncScoringResource @@ -340,7 +322,7 @@ def __init__( if base_url is None: base_url = os.environ.get("LLAMA_STACK_CLIENT_BASE_URL") if base_url is None: - base_url = "http://any-hosted-llama-stack.com" + base_url = f"http://any-hosted-llama-stack.com" if provider_data is not None: if default_headers is None: @@ -375,9 +357,7 @@ def __init__( self.routes = routes.AsyncRoutesResource(self) self.safety = safety.AsyncSafetyResource(self) self.shields = shields.AsyncShieldsResource(self) - self.synthetic_data_generation = ( - synthetic_data_generation.AsyncSyntheticDataGenerationResource(self) - ) + self.synthetic_data_generation = synthetic_data_generation.AsyncSyntheticDataGenerationResource(self) self.telemetry = telemetry.AsyncTelemetryResource(self) self.datasetio = datasetio.AsyncDatasetioResource(self) self.scoring = scoring.AsyncScoringResource(self) @@ -417,14 +397,10 @@ def copy( Create a new client instance re-using the same options given to the current client with optional overriding. """ if default_headers is not None and set_default_headers is not None: - raise ValueError( - "The `default_headers` and `set_default_headers` arguments are mutually exclusive" - ) + raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") if default_query is not None and set_default_query is not None: - raise ValueError( - "The `default_query` and `set_default_query` arguments are mutually exclusive" - ) + raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") headers = self._custom_headers if default_headers is not None: @@ -465,14 +441,10 @@ def _make_status_error( return _exceptions.BadRequestError(err_msg, response=response, body=body) if response.status_code == 401: - return _exceptions.AuthenticationError( - err_msg, response=response, body=body - ) + return _exceptions.AuthenticationError(err_msg, response=response, body=body) if response.status_code == 403: - return _exceptions.PermissionDeniedError( - err_msg, response=response, body=body - ) + return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) if response.status_code == 404: return _exceptions.NotFoundError(err_msg, response=response, body=body) @@ -481,232 +453,138 @@ def _make_status_error( return _exceptions.ConflictError(err_msg, response=response, body=body) if response.status_code == 422: - return _exceptions.UnprocessableEntityError( - err_msg, response=response, body=body - ) + return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) if response.status_code == 429: return _exceptions.RateLimitError(err_msg, response=response, body=body) if response.status_code >= 500: - return _exceptions.InternalServerError( - err_msg, response=response, body=body - ) + return _exceptions.InternalServerError(err_msg, response=response, body=body) return APIStatusError(err_msg, response=response, body=body) class LlamaStackClientWithRawResponse: def __init__(self, client: LlamaStackClient) -> None: - self.toolgroups = toolgroups.ToolgroupsResourceWithRawResponse( - client.toolgroups - ) + self.toolgroups = toolgroups.ToolgroupsResourceWithRawResponse(client.toolgroups) self.tools = tools.ToolsResourceWithRawResponse(client.tools) - self.tool_runtime = tool_runtime.ToolRuntimeResourceWithRawResponse( - client.tool_runtime - ) + self.tool_runtime = tool_runtime.ToolRuntimeResourceWithRawResponse(client.tool_runtime) self.agents = agents.AgentsResourceWithRawResponse(client.agents) - self.batch_inference = batch_inference.BatchInferenceResourceWithRawResponse( - client.batch_inference - ) + self.batch_inference = batch_inference.BatchInferenceResourceWithRawResponse(client.batch_inference) self.datasets = datasets.DatasetsResourceWithRawResponse(client.datasets) self.eval = eval.EvalResourceWithRawResponse(client.eval) self.inspect = inspect.InspectResourceWithRawResponse(client.inspect) self.inference = inference.InferenceResourceWithRawResponse(client.inference) self.memory = memory.MemoryResourceWithRawResponse(client.memory) - self.memory_banks = memory_banks.MemoryBanksResourceWithRawResponse( - client.memory_banks - ) + self.memory_banks = memory_banks.MemoryBanksResourceWithRawResponse(client.memory_banks) self.models = models.ModelsResourceWithRawResponse(client.models) - self.post_training = post_training.PostTrainingResourceWithRawResponse( - client.post_training - ) + self.post_training = post_training.PostTrainingResourceWithRawResponse(client.post_training) self.providers = providers.ProvidersResourceWithRawResponse(client.providers) self.routes = routes.RoutesResourceWithRawResponse(client.routes) self.safety = safety.SafetyResourceWithRawResponse(client.safety) self.shields = shields.ShieldsResourceWithRawResponse(client.shields) - self.synthetic_data_generation = ( - synthetic_data_generation.SyntheticDataGenerationResourceWithRawResponse( - client.synthetic_data_generation - ) + self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResourceWithRawResponse( + client.synthetic_data_generation ) self.telemetry = telemetry.TelemetryResourceWithRawResponse(client.telemetry) self.datasetio = datasetio.DatasetioResourceWithRawResponse(client.datasetio) self.scoring = scoring.ScoringResourceWithRawResponse(client.scoring) - self.scoring_functions = ( - scoring_functions.ScoringFunctionsResourceWithRawResponse( - client.scoring_functions - ) - ) + self.scoring_functions = scoring_functions.ScoringFunctionsResourceWithRawResponse(client.scoring_functions) self.eval_tasks = eval_tasks.EvalTasksResourceWithRawResponse(client.eval_tasks) class AsyncLlamaStackClientWithRawResponse: def __init__(self, client: AsyncLlamaStackClient) -> None: - self.toolgroups = toolgroups.AsyncToolgroupsResourceWithRawResponse( - client.toolgroups - ) + self.toolgroups = toolgroups.AsyncToolgroupsResourceWithRawResponse(client.toolgroups) self.tools = tools.AsyncToolsResourceWithRawResponse(client.tools) - self.tool_runtime = tool_runtime.AsyncToolRuntimeResourceWithRawResponse( - client.tool_runtime - ) + self.tool_runtime = tool_runtime.AsyncToolRuntimeResourceWithRawResponse(client.tool_runtime) self.agents = agents.AsyncAgentsResourceWithRawResponse(client.agents) - self.batch_inference = ( - batch_inference.AsyncBatchInferenceResourceWithRawResponse( - client.batch_inference - ) - ) + self.batch_inference = batch_inference.AsyncBatchInferenceResourceWithRawResponse(client.batch_inference) self.datasets = datasets.AsyncDatasetsResourceWithRawResponse(client.datasets) self.eval = eval.AsyncEvalResourceWithRawResponse(client.eval) self.inspect = inspect.AsyncInspectResourceWithRawResponse(client.inspect) - self.inference = inference.AsyncInferenceResourceWithRawResponse( - client.inference - ) + self.inference = inference.AsyncInferenceResourceWithRawResponse(client.inference) self.memory = memory.AsyncMemoryResourceWithRawResponse(client.memory) - self.memory_banks = memory_banks.AsyncMemoryBanksResourceWithRawResponse( - client.memory_banks - ) + self.memory_banks = memory_banks.AsyncMemoryBanksResourceWithRawResponse(client.memory_banks) self.models = models.AsyncModelsResourceWithRawResponse(client.models) - self.post_training = post_training.AsyncPostTrainingResourceWithRawResponse( - client.post_training - ) - self.providers = providers.AsyncProvidersResourceWithRawResponse( - client.providers - ) + self.post_training = post_training.AsyncPostTrainingResourceWithRawResponse(client.post_training) + self.providers = providers.AsyncProvidersResourceWithRawResponse(client.providers) self.routes = routes.AsyncRoutesResourceWithRawResponse(client.routes) self.safety = safety.AsyncSafetyResourceWithRawResponse(client.safety) self.shields = shields.AsyncShieldsResourceWithRawResponse(client.shields) self.synthetic_data_generation = synthetic_data_generation.AsyncSyntheticDataGenerationResourceWithRawResponse( client.synthetic_data_generation ) - self.telemetry = telemetry.AsyncTelemetryResourceWithRawResponse( - client.telemetry - ) - self.datasetio = datasetio.AsyncDatasetioResourceWithRawResponse( - client.datasetio - ) + self.telemetry = telemetry.AsyncTelemetryResourceWithRawResponse(client.telemetry) + self.datasetio = datasetio.AsyncDatasetioResourceWithRawResponse(client.datasetio) self.scoring = scoring.AsyncScoringResourceWithRawResponse(client.scoring) - self.scoring_functions = ( - scoring_functions.AsyncScoringFunctionsResourceWithRawResponse( - client.scoring_functions - ) - ) - self.eval_tasks = eval_tasks.AsyncEvalTasksResourceWithRawResponse( - client.eval_tasks + self.scoring_functions = scoring_functions.AsyncScoringFunctionsResourceWithRawResponse( + client.scoring_functions ) + self.eval_tasks = eval_tasks.AsyncEvalTasksResourceWithRawResponse(client.eval_tasks) class LlamaStackClientWithStreamedResponse: def __init__(self, client: LlamaStackClient) -> None: - self.toolgroups = toolgroups.ToolgroupsResourceWithStreamingResponse( - client.toolgroups - ) + self.toolgroups = toolgroups.ToolgroupsResourceWithStreamingResponse(client.toolgroups) self.tools = tools.ToolsResourceWithStreamingResponse(client.tools) - self.tool_runtime = tool_runtime.ToolRuntimeResourceWithStreamingResponse( - client.tool_runtime - ) + self.tool_runtime = tool_runtime.ToolRuntimeResourceWithStreamingResponse(client.tool_runtime) self.agents = agents.AgentsResourceWithStreamingResponse(client.agents) - self.batch_inference = ( - batch_inference.BatchInferenceResourceWithStreamingResponse( - client.batch_inference - ) - ) + self.batch_inference = batch_inference.BatchInferenceResourceWithStreamingResponse(client.batch_inference) self.datasets = datasets.DatasetsResourceWithStreamingResponse(client.datasets) self.eval = eval.EvalResourceWithStreamingResponse(client.eval) self.inspect = inspect.InspectResourceWithStreamingResponse(client.inspect) - self.inference = inference.InferenceResourceWithStreamingResponse( - client.inference - ) + self.inference = inference.InferenceResourceWithStreamingResponse(client.inference) self.memory = memory.MemoryResourceWithStreamingResponse(client.memory) - self.memory_banks = memory_banks.MemoryBanksResourceWithStreamingResponse( - client.memory_banks - ) + self.memory_banks = memory_banks.MemoryBanksResourceWithStreamingResponse(client.memory_banks) self.models = models.ModelsResourceWithStreamingResponse(client.models) - self.post_training = post_training.PostTrainingResourceWithStreamingResponse( - client.post_training - ) - self.providers = providers.ProvidersResourceWithStreamingResponse( - client.providers - ) + self.post_training = post_training.PostTrainingResourceWithStreamingResponse(client.post_training) + self.providers = providers.ProvidersResourceWithStreamingResponse(client.providers) self.routes = routes.RoutesResourceWithStreamingResponse(client.routes) self.safety = safety.SafetyResourceWithStreamingResponse(client.safety) self.shields = shields.ShieldsResourceWithStreamingResponse(client.shields) self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResourceWithStreamingResponse( client.synthetic_data_generation ) - self.telemetry = telemetry.TelemetryResourceWithStreamingResponse( - client.telemetry - ) - self.datasetio = datasetio.DatasetioResourceWithStreamingResponse( - client.datasetio - ) + self.telemetry = telemetry.TelemetryResourceWithStreamingResponse(client.telemetry) + self.datasetio = datasetio.DatasetioResourceWithStreamingResponse(client.datasetio) self.scoring = scoring.ScoringResourceWithStreamingResponse(client.scoring) - self.scoring_functions = ( - scoring_functions.ScoringFunctionsResourceWithStreamingResponse( - client.scoring_functions - ) - ) - self.eval_tasks = eval_tasks.EvalTasksResourceWithStreamingResponse( - client.eval_tasks + self.scoring_functions = scoring_functions.ScoringFunctionsResourceWithStreamingResponse( + client.scoring_functions ) + self.eval_tasks = eval_tasks.EvalTasksResourceWithStreamingResponse(client.eval_tasks) class AsyncLlamaStackClientWithStreamedResponse: def __init__(self, client: AsyncLlamaStackClient) -> None: - self.toolgroups = toolgroups.AsyncToolgroupsResourceWithStreamingResponse( - client.toolgroups - ) + self.toolgroups = toolgroups.AsyncToolgroupsResourceWithStreamingResponse(client.toolgroups) self.tools = tools.AsyncToolsResourceWithStreamingResponse(client.tools) - self.tool_runtime = tool_runtime.AsyncToolRuntimeResourceWithStreamingResponse( - client.tool_runtime - ) + self.tool_runtime = tool_runtime.AsyncToolRuntimeResourceWithStreamingResponse(client.tool_runtime) self.agents = agents.AsyncAgentsResourceWithStreamingResponse(client.agents) - self.batch_inference = ( - batch_inference.AsyncBatchInferenceResourceWithStreamingResponse( - client.batch_inference - ) - ) - self.datasets = datasets.AsyncDatasetsResourceWithStreamingResponse( - client.datasets - ) + self.batch_inference = batch_inference.AsyncBatchInferenceResourceWithStreamingResponse(client.batch_inference) + self.datasets = datasets.AsyncDatasetsResourceWithStreamingResponse(client.datasets) self.eval = eval.AsyncEvalResourceWithStreamingResponse(client.eval) self.inspect = inspect.AsyncInspectResourceWithStreamingResponse(client.inspect) - self.inference = inference.AsyncInferenceResourceWithStreamingResponse( - client.inference - ) + self.inference = inference.AsyncInferenceResourceWithStreamingResponse(client.inference) self.memory = memory.AsyncMemoryResourceWithStreamingResponse(client.memory) - self.memory_banks = memory_banks.AsyncMemoryBanksResourceWithStreamingResponse( - client.memory_banks - ) + self.memory_banks = memory_banks.AsyncMemoryBanksResourceWithStreamingResponse(client.memory_banks) self.models = models.AsyncModelsResourceWithStreamingResponse(client.models) - self.post_training = ( - post_training.AsyncPostTrainingResourceWithStreamingResponse( - client.post_training - ) - ) - self.providers = providers.AsyncProvidersResourceWithStreamingResponse( - client.providers - ) + self.post_training = post_training.AsyncPostTrainingResourceWithStreamingResponse(client.post_training) + self.providers = providers.AsyncProvidersResourceWithStreamingResponse(client.providers) self.routes = routes.AsyncRoutesResourceWithStreamingResponse(client.routes) self.safety = safety.AsyncSafetyResourceWithStreamingResponse(client.safety) self.shields = shields.AsyncShieldsResourceWithStreamingResponse(client.shields) - self.synthetic_data_generation = synthetic_data_generation.AsyncSyntheticDataGenerationResourceWithStreamingResponse( - client.synthetic_data_generation - ) - self.telemetry = telemetry.AsyncTelemetryResourceWithStreamingResponse( - client.telemetry - ) - self.datasetio = datasetio.AsyncDatasetioResourceWithStreamingResponse( - client.datasetio - ) - self.scoring = scoring.AsyncScoringResourceWithStreamingResponse(client.scoring) - self.scoring_functions = ( - scoring_functions.AsyncScoringFunctionsResourceWithStreamingResponse( - client.scoring_functions + self.synthetic_data_generation = ( + synthetic_data_generation.AsyncSyntheticDataGenerationResourceWithStreamingResponse( + client.synthetic_data_generation ) ) - self.eval_tasks = eval_tasks.AsyncEvalTasksResourceWithStreamingResponse( - client.eval_tasks + self.telemetry = telemetry.AsyncTelemetryResourceWithStreamingResponse(client.telemetry) + self.datasetio = datasetio.AsyncDatasetioResourceWithStreamingResponse(client.datasetio) + self.scoring = scoring.AsyncScoringResourceWithStreamingResponse(client.scoring) + self.scoring_functions = scoring_functions.AsyncScoringFunctionsResourceWithStreamingResponse( + client.scoring_functions ) + self.eval_tasks = eval_tasks.AsyncEvalTasksResourceWithStreamingResponse(client.eval_tasks) Client = LlamaStackClient diff --git a/src/llama_stack_client/_wrappers.py b/src/llama_stack_client/_wrappers.py new file mode 100644 index 00000000..471b39dd --- /dev/null +++ b/src/llama_stack_client/_wrappers.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Generic, TypeVar + +from ._models import GenericModel + +__all__ = ["DataWrapper"] + +_T = TypeVar("_T") + + +class DataWrapper(GenericModel, Generic[_T]): + data: _T + + @staticmethod + def _unwrapper(obj: "DataWrapper[_T]") -> _T: + return obj.data diff --git a/src/llama_stack_client/resources/datasets.py b/src/llama_stack_client/resources/datasets.py index 4e9efe5d..edb556fc 100644 --- a/src/llama_stack_client/resources/datasets.py +++ b/src/llama_stack_client/resources/datasets.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Dict, Union, Iterable, Optional +from typing import Dict, Type, Union, Iterable, Optional, cast import httpx @@ -21,6 +21,7 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .._wrappers import DataWrapper from .._base_client import make_request_options from ..types.shared_params.url import URL from ..types.dataset_list_response import DatasetListResponse @@ -126,9 +127,13 @@ def list( return self._get( "/v1/datasets", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[DatasetListResponse]._unwrapper, ), - cast_to=DatasetListResponse, + cast_to=cast(Type[DatasetListResponse], DataWrapper[DatasetListResponse]), ) def register( @@ -328,9 +333,13 @@ async def list( return await self._get( "/v1/datasets", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[DatasetListResponse]._unwrapper, ), - cast_to=DatasetListResponse, + cast_to=cast(Type[DatasetListResponse], DataWrapper[DatasetListResponse]), ) async def register( diff --git a/src/llama_stack_client/resources/eval_tasks.py b/src/llama_stack_client/resources/eval_tasks.py index f869f804..2f7bc089 100644 --- a/src/llama_stack_client/resources/eval_tasks.py +++ b/src/llama_stack_client/resources/eval_tasks.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional +from typing import Dict, List, Type, Union, Iterable, Optional, cast import httpx @@ -21,6 +21,7 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .._wrappers import DataWrapper from .._base_client import make_request_options from ..types.eval_task import EvalTask from ..types.eval_task_list_response import EvalTaskListResponse @@ -124,9 +125,13 @@ def list( return self._get( "/v1/eval-tasks", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[EvalTaskListResponse]._unwrapper, ), - cast_to=EvalTaskListResponse, + cast_to=cast(Type[EvalTaskListResponse], DataWrapper[EvalTaskListResponse]), ) def register( @@ -283,9 +288,13 @@ async def list( return await self._get( "/v1/eval-tasks", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[EvalTaskListResponse]._unwrapper, ), - cast_to=EvalTaskListResponse, + cast_to=cast(Type[EvalTaskListResponse], DataWrapper[EvalTaskListResponse]), ) async def register( diff --git a/src/llama_stack_client/resources/memory_banks.py b/src/llama_stack_client/resources/memory_banks.py index 1b4a393a..1d2fb197 100644 --- a/src/llama_stack_client/resources/memory_banks.py +++ b/src/llama_stack_client/resources/memory_banks.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Any, Optional, cast +from typing import Any, Type, Optional, cast import httpx @@ -21,6 +21,7 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .._wrappers import DataWrapper from .._base_client import make_request_options from ..types.memory_bank_list_response import MemoryBankListResponse from ..types.memory_bank_register_response import MemoryBankRegisterResponse @@ -130,9 +131,13 @@ def list( return self._get( "/v1/memory-banks", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[MemoryBankListResponse]._unwrapper, ), - cast_to=MemoryBankListResponse, + cast_to=cast(Type[MemoryBankListResponse], DataWrapper[MemoryBankListResponse]), ) def register( @@ -337,9 +342,13 @@ async def list( return await self._get( "/v1/memory-banks", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[MemoryBankListResponse]._unwrapper, ), - cast_to=MemoryBankListResponse, + cast_to=cast(Type[MemoryBankListResponse], DataWrapper[MemoryBankListResponse]), ) async def register( diff --git a/src/llama_stack_client/resources/models.py b/src/llama_stack_client/resources/models.py index e75525e6..1e93f7d5 100644 --- a/src/llama_stack_client/resources/models.py +++ b/src/llama_stack_client/resources/models.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Dict, Union, Iterable, Optional +from typing import Dict, Type, Union, Iterable, Optional, cast from typing_extensions import Literal import httpx @@ -22,6 +22,7 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .._wrappers import DataWrapper from ..types.model import Model from .._base_client import make_request_options from ..types.model_list_response import ModelListResponse @@ -125,9 +126,13 @@ def list( return self._get( "/v1/models", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[ModelListResponse]._unwrapper, ), - cast_to=ModelListResponse, + cast_to=cast(Type[ModelListResponse], DataWrapper[ModelListResponse]), ) def register( @@ -324,9 +329,13 @@ async def list( return await self._get( "/v1/models", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[ModelListResponse]._unwrapper, ), - cast_to=ModelListResponse, + cast_to=cast(Type[ModelListResponse], DataWrapper[ModelListResponse]), ) async def register( diff --git a/src/llama_stack_client/resources/post_training/job.py b/src/llama_stack_client/resources/post_training/job.py index fe80a69e..bd98c268 100644 --- a/src/llama_stack_client/resources/post_training/job.py +++ b/src/llama_stack_client/resources/post_training/job.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Optional +from typing import Type, Optional, cast import httpx @@ -20,6 +20,7 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from ..._wrappers import DataWrapper from ..._base_client import make_request_options from ...types.post_training import job_cancel_params, job_status_params, job_artifacts_params from ...types.post_training.job_list_response import JobListResponse @@ -83,9 +84,13 @@ def list( return self._get( "/v1/post-training/jobs", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[JobListResponse]._unwrapper, ), - cast_to=JobListResponse, + cast_to=cast(Type[JobListResponse], DataWrapper[JobListResponse]), ) def artifacts( @@ -273,9 +278,13 @@ async def list( return await self._get( "/v1/post-training/jobs", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[JobListResponse]._unwrapper, ), - cast_to=JobListResponse, + cast_to=cast(Type[JobListResponse], DataWrapper[JobListResponse]), ) async def artifacts( diff --git a/src/llama_stack_client/resources/providers.py b/src/llama_stack_client/resources/providers.py index 2b85dcde..200b75ae 100644 --- a/src/llama_stack_client/resources/providers.py +++ b/src/llama_stack_client/resources/providers.py @@ -2,6 +2,8 @@ from __future__ import annotations +from typing import Type, cast + import httpx from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven @@ -14,6 +16,7 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .._wrappers import DataWrapper from .._base_client import make_request_options from ..types.provider_list_response import ProviderListResponse @@ -74,9 +77,13 @@ def list( return self._get( "/v1/providers/list", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[ProviderListResponse]._unwrapper, ), - cast_to=ProviderListResponse, + cast_to=cast(Type[ProviderListResponse], DataWrapper[ProviderListResponse]), ) @@ -134,9 +141,13 @@ async def list( return await self._get( "/v1/providers/list", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[ProviderListResponse]._unwrapper, ), - cast_to=ProviderListResponse, + cast_to=cast(Type[ProviderListResponse], DataWrapper[ProviderListResponse]), ) diff --git a/src/llama_stack_client/resources/scoring_functions.py b/src/llama_stack_client/resources/scoring_functions.py index 07f6f55f..115a9156 100644 --- a/src/llama_stack_client/resources/scoring_functions.py +++ b/src/llama_stack_client/resources/scoring_functions.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Optional +from typing import Type, Optional, cast import httpx @@ -21,6 +21,7 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .._wrappers import DataWrapper from .._base_client import make_request_options from ..types.scoring_fn import ScoringFn from ..types.shared_params.return_type import ReturnType @@ -125,9 +126,13 @@ def list( return self._get( "/v1/scoring-functions", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[ScoringFunctionListResponse]._unwrapper, ), - cast_to=ScoringFunctionListResponse, + cast_to=cast(Type[ScoringFunctionListResponse], DataWrapper[ScoringFunctionListResponse]), ) def register( @@ -284,9 +289,13 @@ async def list( return await self._get( "/v1/scoring-functions", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[ScoringFunctionListResponse]._unwrapper, ), - cast_to=ScoringFunctionListResponse, + cast_to=cast(Type[ScoringFunctionListResponse], DataWrapper[ScoringFunctionListResponse]), ) async def register( diff --git a/src/llama_stack_client/resources/shields.py b/src/llama_stack_client/resources/shields.py index 3c14a6f7..e8327473 100644 --- a/src/llama_stack_client/resources/shields.py +++ b/src/llama_stack_client/resources/shields.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Dict, Union, Iterable, Optional +from typing import Dict, Type, Union, Iterable, Optional, cast import httpx @@ -21,6 +21,7 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .._wrappers import DataWrapper from .._base_client import make_request_options from ..types.shield import Shield from ..types.shield_list_response import ShieldListResponse @@ -124,9 +125,13 @@ def list( return self._get( "/v1/shields", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[ShieldListResponse]._unwrapper, ), - cast_to=ShieldListResponse, + cast_to=cast(Type[ShieldListResponse], DataWrapper[ShieldListResponse]), ) def register( @@ -278,9 +283,13 @@ async def list( return await self._get( "/v1/shields", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[ShieldListResponse]._unwrapper, ), - cast_to=ShieldListResponse, + cast_to=cast(Type[ShieldListResponse], DataWrapper[ShieldListResponse]), ) async def register( diff --git a/src/llama_stack_client/resources/toolgroups.py b/src/llama_stack_client/resources/toolgroups.py index 0adb5e7f..5d65f9ef 100644 --- a/src/llama_stack_client/resources/toolgroups.py +++ b/src/llama_stack_client/resources/toolgroups.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Dict, Union, Iterable +from typing import Dict, Type, Union, Iterable, cast import httpx @@ -21,6 +21,7 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .._wrappers import DataWrapper from .._base_client import make_request_options from ..types.tool_group import ToolGroup from ..types.shared_params.url import URL @@ -85,9 +86,13 @@ def list( return self._get( "/v1/toolgroups", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[ToolgroupListResponse]._unwrapper, ), - cast_to=ToolgroupListResponse, + cast_to=cast(Type[ToolgroupListResponse], DataWrapper[ToolgroupListResponse]), ) def get( @@ -289,9 +294,13 @@ async def list( return await self._get( "/v1/toolgroups", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[ToolgroupListResponse]._unwrapper, ), - cast_to=ToolgroupListResponse, + cast_to=cast(Type[ToolgroupListResponse], DataWrapper[ToolgroupListResponse]), ) async def get( diff --git a/src/llama_stack_client/resources/tools.py b/src/llama_stack_client/resources/tools.py index 6ca15896..b91c00c2 100644 --- a/src/llama_stack_client/resources/tools.py +++ b/src/llama_stack_client/resources/tools.py @@ -2,6 +2,8 @@ from __future__ import annotations +from typing import Type, cast + import httpx from ..types import tool_list_params @@ -19,6 +21,7 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .._wrappers import DataWrapper from ..types.tool import Tool from .._base_client import make_request_options from ..types.tool_list_response import ToolListResponse @@ -88,8 +91,9 @@ def list( extra_body=extra_body, timeout=timeout, query=maybe_transform({"toolgroup_id": toolgroup_id}, tool_list_params.ToolListParams), + post_parser=DataWrapper[ToolListResponse]._unwrapper, ), - cast_to=ToolListResponse, + cast_to=cast(Type[ToolListResponse], DataWrapper[ToolListResponse]), ) def get( @@ -197,8 +201,9 @@ async def list( extra_body=extra_body, timeout=timeout, query=await async_maybe_transform({"toolgroup_id": toolgroup_id}, tool_list_params.ToolListParams), + post_parser=DataWrapper[ToolListResponse]._unwrapper, ), - cast_to=ToolListResponse, + cast_to=cast(Type[ToolListResponse], DataWrapper[ToolListResponse]), ) async def get( diff --git a/src/llama_stack_client/types/dataset_list_response.py b/src/llama_stack_client/types/dataset_list_response.py index a0d59234..0051669b 100644 --- a/src/llama_stack_client/types/dataset_list_response.py +++ b/src/llama_stack_client/types/dataset_list_response.py @@ -1,16 +1,16 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Dict, List, Union -from typing_extensions import Literal +from typing_extensions import Literal, TypeAlias from .._models import BaseModel from .shared.url import URL from .shared.param_type import ParamType -__all__ = ["DatasetListResponse", "Data"] +__all__ = ["DatasetListResponse", "DatasetListResponseItem"] -class Data(BaseModel): +class DatasetListResponseItem(BaseModel): dataset_schema: Dict[str, ParamType] identifier: str @@ -26,5 +26,4 @@ class Data(BaseModel): url: URL -class DatasetListResponse(BaseModel): - data: List[Data] +DatasetListResponse: TypeAlias = List[DatasetListResponseItem] diff --git a/src/llama_stack_client/types/eval_task_list_response.py b/src/llama_stack_client/types/eval_task_list_response.py index 8f6db11d..11646563 100644 --- a/src/llama_stack_client/types/eval_task_list_response.py +++ b/src/llama_stack_client/types/eval_task_list_response.py @@ -1,12 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List +from typing_extensions import TypeAlias -from .._models import BaseModel from .eval_task import EvalTask __all__ = ["EvalTaskListResponse"] - -class EvalTaskListResponse(BaseModel): - data: List[EvalTask] +EvalTaskListResponse: TypeAlias = List[EvalTask] diff --git a/src/llama_stack_client/types/memory_bank_list_response.py b/src/llama_stack_client/types/memory_bank_list_response.py index 9ed4aa25..27d646f5 100644 --- a/src/llama_stack_client/types/memory_bank_list_response.py +++ b/src/llama_stack_client/types/memory_bank_list_response.py @@ -7,15 +7,15 @@ __all__ = [ "MemoryBankListResponse", - "Data", - "DataVectorMemoryBank", - "DataKeyValueMemoryBank", - "DataKeywordMemoryBank", - "DataGraphMemoryBank", + "MemoryBankListResponseItem", + "MemoryBankListResponseItemVectorMemoryBank", + "MemoryBankListResponseItemKeyValueMemoryBank", + "MemoryBankListResponseItemKeywordMemoryBank", + "MemoryBankListResponseItemGraphMemoryBank", ] -class DataVectorMemoryBank(BaseModel): +class MemoryBankListResponseItemVectorMemoryBank(BaseModel): chunk_size_in_tokens: int embedding_model: str @@ -35,7 +35,7 @@ class DataVectorMemoryBank(BaseModel): overlap_size_in_tokens: Optional[int] = None -class DataKeyValueMemoryBank(BaseModel): +class MemoryBankListResponseItemKeyValueMemoryBank(BaseModel): identifier: str memory_bank_type: Literal["keyvalue"] @@ -47,7 +47,7 @@ class DataKeyValueMemoryBank(BaseModel): type: Literal["memory_bank"] -class DataKeywordMemoryBank(BaseModel): +class MemoryBankListResponseItemKeywordMemoryBank(BaseModel): identifier: str memory_bank_type: Literal["keyword"] @@ -59,7 +59,7 @@ class DataKeywordMemoryBank(BaseModel): type: Literal["memory_bank"] -class DataGraphMemoryBank(BaseModel): +class MemoryBankListResponseItemGraphMemoryBank(BaseModel): identifier: str memory_bank_type: Literal["graph"] @@ -71,8 +71,11 @@ class DataGraphMemoryBank(BaseModel): type: Literal["memory_bank"] -Data: TypeAlias = Union[DataVectorMemoryBank, DataKeyValueMemoryBank, DataKeywordMemoryBank, DataGraphMemoryBank] - +MemoryBankListResponseItem: TypeAlias = Union[ + MemoryBankListResponseItemVectorMemoryBank, + MemoryBankListResponseItemKeyValueMemoryBank, + MemoryBankListResponseItemKeywordMemoryBank, + MemoryBankListResponseItemGraphMemoryBank, +] -class MemoryBankListResponse(BaseModel): - data: List[Data] +MemoryBankListResponse: TypeAlias = List[MemoryBankListResponseItem] diff --git a/src/llama_stack_client/types/model_list_response.py b/src/llama_stack_client/types/model_list_response.py index 0119042c..905cdb0f 100644 --- a/src/llama_stack_client/types/model_list_response.py +++ b/src/llama_stack_client/types/model_list_response.py @@ -1,12 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List +from typing_extensions import TypeAlias from .model import Model -from .._models import BaseModel __all__ = ["ModelListResponse"] - -class ModelListResponse(BaseModel): - data: List[Model] +ModelListResponse: TypeAlias = List[Model] diff --git a/src/llama_stack_client/types/post_training/job_list_response.py b/src/llama_stack_client/types/post_training/job_list_response.py index 7700ec9f..cb42da2d 100644 --- a/src/llama_stack_client/types/post_training/job_list_response.py +++ b/src/llama_stack_client/types/post_training/job_list_response.py @@ -1,15 +1,15 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List +from typing_extensions import TypeAlias from ..._models import BaseModel -__all__ = ["JobListResponse", "Data"] +__all__ = ["JobListResponse", "JobListResponseItem"] -class Data(BaseModel): +class JobListResponseItem(BaseModel): job_uuid: str -class JobListResponse(BaseModel): - data: List[Data] +JobListResponse: TypeAlias = List[JobListResponseItem] diff --git a/src/llama_stack_client/types/provider_list_response.py b/src/llama_stack_client/types/provider_list_response.py index e02b24a2..cdbc96f7 100644 --- a/src/llama_stack_client/types/provider_list_response.py +++ b/src/llama_stack_client/types/provider_list_response.py @@ -1,12 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List +from typing_extensions import TypeAlias -from .._models import BaseModel from .provider_info import ProviderInfo __all__ = ["ProviderListResponse"] - -class ProviderListResponse(BaseModel): - data: List[ProviderInfo] +ProviderListResponse: TypeAlias = List[ProviderInfo] diff --git a/src/llama_stack_client/types/scoring_function_list_response.py b/src/llama_stack_client/types/scoring_function_list_response.py index 470f5006..bad85a54 100644 --- a/src/llama_stack_client/types/scoring_function_list_response.py +++ b/src/llama_stack_client/types/scoring_function_list_response.py @@ -1,12 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List +from typing_extensions import TypeAlias -from .._models import BaseModel from .scoring_fn import ScoringFn __all__ = ["ScoringFunctionListResponse"] - -class ScoringFunctionListResponse(BaseModel): - data: List[ScoringFn] +ScoringFunctionListResponse: TypeAlias = List[ScoringFn] diff --git a/src/llama_stack_client/types/shield_list_response.py b/src/llama_stack_client/types/shield_list_response.py index 0dd45e40..0cba0500 100644 --- a/src/llama_stack_client/types/shield_list_response.py +++ b/src/llama_stack_client/types/shield_list_response.py @@ -1,12 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List +from typing_extensions import TypeAlias from .shield import Shield -from .._models import BaseModel __all__ = ["ShieldListResponse"] - -class ShieldListResponse(BaseModel): - data: List[Shield] +ShieldListResponse: TypeAlias = List[Shield] diff --git a/src/llama_stack_client/types/tool_list_response.py b/src/llama_stack_client/types/tool_list_response.py index 0aa622db..11750ace 100644 --- a/src/llama_stack_client/types/tool_list_response.py +++ b/src/llama_stack_client/types/tool_list_response.py @@ -1,12 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List +from typing_extensions import TypeAlias from .tool import Tool -from .._models import BaseModel __all__ = ["ToolListResponse"] - -class ToolListResponse(BaseModel): - data: List[Tool] +ToolListResponse: TypeAlias = List[Tool] diff --git a/src/llama_stack_client/types/toolgroup_list_response.py b/src/llama_stack_client/types/toolgroup_list_response.py index 8d297d80..0f668de3 100644 --- a/src/llama_stack_client/types/toolgroup_list_response.py +++ b/src/llama_stack_client/types/toolgroup_list_response.py @@ -1,12 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List +from typing_extensions import TypeAlias -from .._models import BaseModel from .tool_group import ToolGroup __all__ = ["ToolgroupListResponse"] - -class ToolgroupListResponse(BaseModel): - data: List[ToolGroup] +ToolgroupListResponse: TypeAlias = List[ToolGroup]