From fb0e25a25ac480b1796fd984232f86c593da5a79 Mon Sep 17 00:00:00 2001 From: Dinesh Yeduguru Date: Wed, 15 Jan 2025 13:51:57 -0800 Subject: [PATCH] Sync updates from stainless branch: dineshyv/dev --- src/llama_stack_client/_client.py | 306 ++++++++++++------ .../resources/agents/agents.py | 24 +- .../resources/agents/session.py | 100 +++--- .../resources/agents/steps.py | 59 ++-- .../resources/agents/turn.py | 84 +++-- src/llama_stack_client/resources/datasetio.py | 8 +- src/llama_stack_client/resources/datasets.py | 58 ++-- src/llama_stack_client/resources/eval/eval.py | 4 +- src/llama_stack_client/resources/eval/jobs.py | 60 ++-- .../resources/eval_tasks.py | 45 ++- .../resources/memory_banks.py | 92 ++---- src/llama_stack_client/resources/models.py | 65 ++-- .../resources/post_training/job.py | 12 +- .../resources/scoring_functions.py | 49 ++- src/llama_stack_client/resources/shields.py | 47 ++- src/llama_stack_client/resources/telemetry.py | 44 +-- .../resources/tool_runtime.py | 26 +- .../resources/toolgroups.py | 69 ++-- src/llama_stack_client/resources/tools.py | 49 ++- src/llama_stack_client/types/__init__.py | 19 +- .../types/agents/__init__.py | 3 - .../types/agents/session_create_params.py | 2 - .../types/agents/session_retrieve_params.py | 2 - .../types/agents/turn_create_params.py | 2 - .../types/dataset_list_response.py | 8 +- .../types/eval/job_retrieve_params.py | 2 - .../types/eval/job_status_params.py | 2 - .../types/eval_task_list_response.py | 12 + .../types/memory_bank_list_response.py | 25 +- .../types/model_list_response.py | 19 +- .../types/post_training/__init__.py | 1 + .../types/post_training/job_list_response.py | 15 + .../types/provider_list_response.py | 8 +- .../types/scoring_function_list_response.py | 80 +---- .../types/shared/sampling_params.py | 39 ++- .../types/shared_params/sampling_params.py | 38 ++- .../types/shield_list_response.py | 15 +- .../types/telemetry_get_span_tree_params.py | 4 +- .../types/tool_list_params.py | 2 +- .../types/tool_list_response.py | 12 + .../types/tool_runtime_list_tools_params.py | 4 +- .../types/toolgroup_list_response.py | 12 + tests/api_resources/agents/test_session.py | 104 +++++- tests/api_resources/agents/test_steps.py | 84 ++++- tests/api_resources/agents/test_turn.py | 214 ++++++++++-- tests/api_resources/eval/test_jobs.py | 32 ++ tests/api_resources/post_training/test_job.py | 42 +-- tests/api_resources/test_agents.py | 24 +- tests/api_resources/test_batch_inference.py | 20 +- tests/api_resources/test_datasets.py | 57 ++-- tests/api_resources/test_eval.py | 44 +-- tests/api_resources/test_eval_tasks.py | 72 ++--- tests/api_resources/test_inference.py | 40 +-- tests/api_resources/test_memory_banks.py | 52 +-- tests/api_resources/test_models.py | 86 ++--- tests/api_resources/test_scoring_functions.py | 58 ++-- tests/api_resources/test_shields.py | 56 ++-- tests/api_resources/test_telemetry.py | 4 +- tests/api_resources/test_tool_runtime.py | 4 +- tests/api_resources/test_toolgroups.py | 88 ++--- tests/api_resources/test_tools.py | 60 ++-- 61 files changed, 1449 insertions(+), 1219 deletions(-) create mode 100644 src/llama_stack_client/types/eval_task_list_response.py create mode 100644 src/llama_stack_client/types/post_training/job_list_response.py create mode 100644 src/llama_stack_client/types/tool_list_response.py create mode 100644 src/llama_stack_client/types/toolgroup_list_response.py diff --git a/src/llama_stack_client/_client.py b/src/llama_stack_client/_client.py index 429826ad..76166b33 100644 --- a/src/llama_stack_client/_client.py +++ b/src/llama_stack_client/_client.py @@ -1,60 +1,62 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from __future__ import annotations import json import os -from typing import Any, Union, Mapping -from typing_extensions import Self, override +from typing import Any, Mapping, Union import httpx +from typing_extensions import Self, override from . import _exceptions +from ._base_client import ( + DEFAULT_MAX_RETRIES, + AsyncAPIClient, + SyncAPIClient, +) +from ._exceptions import APIStatusError from ._qs import Querystring +from ._streaming import AsyncStream as AsyncStream +from ._streaming import Stream as Stream from ._types import ( NOT_GIVEN, - Omit, - Timeout, NotGiven, - Transport, + Omit, ProxiesTypes, RequestOptions, + Timeout, + Transport, ) from ._utils import ( - is_given, get_async_library, + is_given, ) from ._version import __version__ from .resources import ( - tools, + batch_inference, + datasetio, + datasets, + eval_tasks, + inference, + inspect, memory, + memory_banks, models, + providers, routes, safety, - inspect, scoring, + scoring_functions, shields, - datasets, - datasetio, - inference, - providers, + synthetic_data_generation, telemetry, - eval_tasks, - toolgroups, - memory_banks, tool_runtime, - batch_inference, - scoring_functions, - synthetic_data_generation, -) -from ._streaming import Stream as Stream, AsyncStream as AsyncStream -from ._exceptions import APIStatusError -from ._base_client import ( - DEFAULT_MAX_RETRIES, - SyncAPIClient, - AsyncAPIClient, + toolgroups, + tools, ) -from .resources.eval import eval from .resources.agents import agents +from .resources.eval import eval from .resources.post_training import post_training __all__ = [ @@ -125,7 +127,7 @@ def __init__( if base_url is None: base_url = os.environ.get("LLAMA_STACK_CLIENT_BASE_URL") if base_url is None: - base_url = f"http://any-hosted-llama-stack.com" + base_url = "http://any-hosted-llama-stack.com" if provider_data is not None: if default_headers is None: @@ -160,7 +162,9 @@ def __init__( self.routes = routes.RoutesResource(self) self.safety = safety.SafetyResource(self) self.shields = shields.ShieldsResource(self) - self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResource(self) + self.synthetic_data_generation = ( + synthetic_data_generation.SyntheticDataGenerationResource(self) + ) self.telemetry = telemetry.TelemetryResource(self) self.datasetio = datasetio.DatasetioResource(self) self.scoring = scoring.ScoringResource(self) @@ -200,10 +204,14 @@ def copy( Create a new client instance re-using the same options given to the current client with optional overriding. """ if default_headers is not None and set_default_headers is not None: - raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") + raise ValueError( + "The `default_headers` and `set_default_headers` arguments are mutually exclusive" + ) if default_query is not None and set_default_query is not None: - raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") + raise ValueError( + "The `default_query` and `set_default_query` arguments are mutually exclusive" + ) headers = self._custom_headers if default_headers is not None: @@ -244,10 +252,14 @@ def _make_status_error( return _exceptions.BadRequestError(err_msg, response=response, body=body) if response.status_code == 401: - return _exceptions.AuthenticationError(err_msg, response=response, body=body) + return _exceptions.AuthenticationError( + err_msg, response=response, body=body + ) if response.status_code == 403: - return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) + return _exceptions.PermissionDeniedError( + err_msg, response=response, body=body + ) if response.status_code == 404: return _exceptions.NotFoundError(err_msg, response=response, body=body) @@ -256,13 +268,17 @@ def _make_status_error( return _exceptions.ConflictError(err_msg, response=response, body=body) if response.status_code == 422: - return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) + return _exceptions.UnprocessableEntityError( + err_msg, response=response, body=body + ) if response.status_code == 429: return _exceptions.RateLimitError(err_msg, response=response, body=body) if response.status_code >= 500: - return _exceptions.InternalServerError(err_msg, response=response, body=body) + return _exceptions.InternalServerError( + err_msg, response=response, body=body + ) return APIStatusError(err_msg, response=response, body=body) @@ -284,7 +300,9 @@ class AsyncLlamaStackClient(AsyncAPIClient): routes: routes.AsyncRoutesResource safety: safety.AsyncSafetyResource shields: shields.AsyncShieldsResource - synthetic_data_generation: synthetic_data_generation.AsyncSyntheticDataGenerationResource + synthetic_data_generation: ( + synthetic_data_generation.AsyncSyntheticDataGenerationResource + ) telemetry: telemetry.AsyncTelemetryResource datasetio: datasetio.AsyncDatasetioResource scoring: scoring.AsyncScoringResource @@ -322,7 +340,7 @@ def __init__( if base_url is None: base_url = os.environ.get("LLAMA_STACK_CLIENT_BASE_URL") if base_url is None: - base_url = f"http://any-hosted-llama-stack.com" + base_url = "http://any-hosted-llama-stack.com" if provider_data is not None: if default_headers is None: @@ -357,7 +375,9 @@ def __init__( self.routes = routes.AsyncRoutesResource(self) self.safety = safety.AsyncSafetyResource(self) self.shields = shields.AsyncShieldsResource(self) - self.synthetic_data_generation = synthetic_data_generation.AsyncSyntheticDataGenerationResource(self) + self.synthetic_data_generation = ( + synthetic_data_generation.AsyncSyntheticDataGenerationResource(self) + ) self.telemetry = telemetry.AsyncTelemetryResource(self) self.datasetio = datasetio.AsyncDatasetioResource(self) self.scoring = scoring.AsyncScoringResource(self) @@ -397,10 +417,14 @@ def copy( Create a new client instance re-using the same options given to the current client with optional overriding. """ if default_headers is not None and set_default_headers is not None: - raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") + raise ValueError( + "The `default_headers` and `set_default_headers` arguments are mutually exclusive" + ) if default_query is not None and set_default_query is not None: - raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") + raise ValueError( + "The `default_query` and `set_default_query` arguments are mutually exclusive" + ) headers = self._custom_headers if default_headers is not None: @@ -441,10 +465,14 @@ def _make_status_error( return _exceptions.BadRequestError(err_msg, response=response, body=body) if response.status_code == 401: - return _exceptions.AuthenticationError(err_msg, response=response, body=body) + return _exceptions.AuthenticationError( + err_msg, response=response, body=body + ) if response.status_code == 403: - return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) + return _exceptions.PermissionDeniedError( + err_msg, response=response, body=body + ) if response.status_code == 404: return _exceptions.NotFoundError(err_msg, response=response, body=body) @@ -453,138 +481,232 @@ def _make_status_error( return _exceptions.ConflictError(err_msg, response=response, body=body) if response.status_code == 422: - return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) + return _exceptions.UnprocessableEntityError( + err_msg, response=response, body=body + ) if response.status_code == 429: return _exceptions.RateLimitError(err_msg, response=response, body=body) if response.status_code >= 500: - return _exceptions.InternalServerError(err_msg, response=response, body=body) + return _exceptions.InternalServerError( + err_msg, response=response, body=body + ) return APIStatusError(err_msg, response=response, body=body) class LlamaStackClientWithRawResponse: def __init__(self, client: LlamaStackClient) -> None: - self.toolgroups = toolgroups.ToolgroupsResourceWithRawResponse(client.toolgroups) + self.toolgroups = toolgroups.ToolgroupsResourceWithRawResponse( + client.toolgroups + ) self.tools = tools.ToolsResourceWithRawResponse(client.tools) - self.tool_runtime = tool_runtime.ToolRuntimeResourceWithRawResponse(client.tool_runtime) + self.tool_runtime = tool_runtime.ToolRuntimeResourceWithRawResponse( + client.tool_runtime + ) self.agents = agents.AgentsResourceWithRawResponse(client.agents) - self.batch_inference = batch_inference.BatchInferenceResourceWithRawResponse(client.batch_inference) + self.batch_inference = batch_inference.BatchInferenceResourceWithRawResponse( + client.batch_inference + ) self.datasets = datasets.DatasetsResourceWithRawResponse(client.datasets) self.eval = eval.EvalResourceWithRawResponse(client.eval) self.inspect = inspect.InspectResourceWithRawResponse(client.inspect) self.inference = inference.InferenceResourceWithRawResponse(client.inference) self.memory = memory.MemoryResourceWithRawResponse(client.memory) - self.memory_banks = memory_banks.MemoryBanksResourceWithRawResponse(client.memory_banks) + self.memory_banks = memory_banks.MemoryBanksResourceWithRawResponse( + client.memory_banks + ) self.models = models.ModelsResourceWithRawResponse(client.models) - self.post_training = post_training.PostTrainingResourceWithRawResponse(client.post_training) + self.post_training = post_training.PostTrainingResourceWithRawResponse( + client.post_training + ) self.providers = providers.ProvidersResourceWithRawResponse(client.providers) self.routes = routes.RoutesResourceWithRawResponse(client.routes) self.safety = safety.SafetyResourceWithRawResponse(client.safety) self.shields = shields.ShieldsResourceWithRawResponse(client.shields) - self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResourceWithRawResponse( - client.synthetic_data_generation + self.synthetic_data_generation = ( + synthetic_data_generation.SyntheticDataGenerationResourceWithRawResponse( + client.synthetic_data_generation + ) ) self.telemetry = telemetry.TelemetryResourceWithRawResponse(client.telemetry) self.datasetio = datasetio.DatasetioResourceWithRawResponse(client.datasetio) self.scoring = scoring.ScoringResourceWithRawResponse(client.scoring) - self.scoring_functions = scoring_functions.ScoringFunctionsResourceWithRawResponse(client.scoring_functions) + self.scoring_functions = ( + scoring_functions.ScoringFunctionsResourceWithRawResponse( + client.scoring_functions + ) + ) self.eval_tasks = eval_tasks.EvalTasksResourceWithRawResponse(client.eval_tasks) class AsyncLlamaStackClientWithRawResponse: def __init__(self, client: AsyncLlamaStackClient) -> None: - self.toolgroups = toolgroups.AsyncToolgroupsResourceWithRawResponse(client.toolgroups) + self.toolgroups = toolgroups.AsyncToolgroupsResourceWithRawResponse( + client.toolgroups + ) self.tools = tools.AsyncToolsResourceWithRawResponse(client.tools) - self.tool_runtime = tool_runtime.AsyncToolRuntimeResourceWithRawResponse(client.tool_runtime) + self.tool_runtime = tool_runtime.AsyncToolRuntimeResourceWithRawResponse( + client.tool_runtime + ) self.agents = agents.AsyncAgentsResourceWithRawResponse(client.agents) - self.batch_inference = batch_inference.AsyncBatchInferenceResourceWithRawResponse(client.batch_inference) + self.batch_inference = ( + batch_inference.AsyncBatchInferenceResourceWithRawResponse( + client.batch_inference + ) + ) self.datasets = datasets.AsyncDatasetsResourceWithRawResponse(client.datasets) self.eval = eval.AsyncEvalResourceWithRawResponse(client.eval) self.inspect = inspect.AsyncInspectResourceWithRawResponse(client.inspect) - self.inference = inference.AsyncInferenceResourceWithRawResponse(client.inference) + self.inference = inference.AsyncInferenceResourceWithRawResponse( + client.inference + ) self.memory = memory.AsyncMemoryResourceWithRawResponse(client.memory) - self.memory_banks = memory_banks.AsyncMemoryBanksResourceWithRawResponse(client.memory_banks) + self.memory_banks = memory_banks.AsyncMemoryBanksResourceWithRawResponse( + client.memory_banks + ) self.models = models.AsyncModelsResourceWithRawResponse(client.models) - self.post_training = post_training.AsyncPostTrainingResourceWithRawResponse(client.post_training) - self.providers = providers.AsyncProvidersResourceWithRawResponse(client.providers) + self.post_training = post_training.AsyncPostTrainingResourceWithRawResponse( + client.post_training + ) + self.providers = providers.AsyncProvidersResourceWithRawResponse( + client.providers + ) self.routes = routes.AsyncRoutesResourceWithRawResponse(client.routes) self.safety = safety.AsyncSafetyResourceWithRawResponse(client.safety) self.shields = shields.AsyncShieldsResourceWithRawResponse(client.shields) self.synthetic_data_generation = synthetic_data_generation.AsyncSyntheticDataGenerationResourceWithRawResponse( client.synthetic_data_generation ) - self.telemetry = telemetry.AsyncTelemetryResourceWithRawResponse(client.telemetry) - self.datasetio = datasetio.AsyncDatasetioResourceWithRawResponse(client.datasetio) + self.telemetry = telemetry.AsyncTelemetryResourceWithRawResponse( + client.telemetry + ) + self.datasetio = datasetio.AsyncDatasetioResourceWithRawResponse( + client.datasetio + ) self.scoring = scoring.AsyncScoringResourceWithRawResponse(client.scoring) - self.scoring_functions = scoring_functions.AsyncScoringFunctionsResourceWithRawResponse( - client.scoring_functions + self.scoring_functions = ( + scoring_functions.AsyncScoringFunctionsResourceWithRawResponse( + client.scoring_functions + ) + ) + self.eval_tasks = eval_tasks.AsyncEvalTasksResourceWithRawResponse( + client.eval_tasks ) - self.eval_tasks = eval_tasks.AsyncEvalTasksResourceWithRawResponse(client.eval_tasks) class LlamaStackClientWithStreamedResponse: def __init__(self, client: LlamaStackClient) -> None: - self.toolgroups = toolgroups.ToolgroupsResourceWithStreamingResponse(client.toolgroups) + self.toolgroups = toolgroups.ToolgroupsResourceWithStreamingResponse( + client.toolgroups + ) self.tools = tools.ToolsResourceWithStreamingResponse(client.tools) - self.tool_runtime = tool_runtime.ToolRuntimeResourceWithStreamingResponse(client.tool_runtime) + self.tool_runtime = tool_runtime.ToolRuntimeResourceWithStreamingResponse( + client.tool_runtime + ) self.agents = agents.AgentsResourceWithStreamingResponse(client.agents) - self.batch_inference = batch_inference.BatchInferenceResourceWithStreamingResponse(client.batch_inference) + self.batch_inference = ( + batch_inference.BatchInferenceResourceWithStreamingResponse( + client.batch_inference + ) + ) self.datasets = datasets.DatasetsResourceWithStreamingResponse(client.datasets) self.eval = eval.EvalResourceWithStreamingResponse(client.eval) self.inspect = inspect.InspectResourceWithStreamingResponse(client.inspect) - self.inference = inference.InferenceResourceWithStreamingResponse(client.inference) + self.inference = inference.InferenceResourceWithStreamingResponse( + client.inference + ) self.memory = memory.MemoryResourceWithStreamingResponse(client.memory) - self.memory_banks = memory_banks.MemoryBanksResourceWithStreamingResponse(client.memory_banks) + self.memory_banks = memory_banks.MemoryBanksResourceWithStreamingResponse( + client.memory_banks + ) self.models = models.ModelsResourceWithStreamingResponse(client.models) - self.post_training = post_training.PostTrainingResourceWithStreamingResponse(client.post_training) - self.providers = providers.ProvidersResourceWithStreamingResponse(client.providers) + self.post_training = post_training.PostTrainingResourceWithStreamingResponse( + client.post_training + ) + self.providers = providers.ProvidersResourceWithStreamingResponse( + client.providers + ) self.routes = routes.RoutesResourceWithStreamingResponse(client.routes) self.safety = safety.SafetyResourceWithStreamingResponse(client.safety) self.shields = shields.ShieldsResourceWithStreamingResponse(client.shields) self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResourceWithStreamingResponse( client.synthetic_data_generation ) - self.telemetry = telemetry.TelemetryResourceWithStreamingResponse(client.telemetry) - self.datasetio = datasetio.DatasetioResourceWithStreamingResponse(client.datasetio) + self.telemetry = telemetry.TelemetryResourceWithStreamingResponse( + client.telemetry + ) + self.datasetio = datasetio.DatasetioResourceWithStreamingResponse( + client.datasetio + ) self.scoring = scoring.ScoringResourceWithStreamingResponse(client.scoring) - self.scoring_functions = scoring_functions.ScoringFunctionsResourceWithStreamingResponse( - client.scoring_functions + self.scoring_functions = ( + scoring_functions.ScoringFunctionsResourceWithStreamingResponse( + client.scoring_functions + ) + ) + self.eval_tasks = eval_tasks.EvalTasksResourceWithStreamingResponse( + client.eval_tasks ) - self.eval_tasks = eval_tasks.EvalTasksResourceWithStreamingResponse(client.eval_tasks) class AsyncLlamaStackClientWithStreamedResponse: def __init__(self, client: AsyncLlamaStackClient) -> None: - self.toolgroups = toolgroups.AsyncToolgroupsResourceWithStreamingResponse(client.toolgroups) + self.toolgroups = toolgroups.AsyncToolgroupsResourceWithStreamingResponse( + client.toolgroups + ) self.tools = tools.AsyncToolsResourceWithStreamingResponse(client.tools) - self.tool_runtime = tool_runtime.AsyncToolRuntimeResourceWithStreamingResponse(client.tool_runtime) + self.tool_runtime = tool_runtime.AsyncToolRuntimeResourceWithStreamingResponse( + client.tool_runtime + ) self.agents = agents.AsyncAgentsResourceWithStreamingResponse(client.agents) - self.batch_inference = batch_inference.AsyncBatchInferenceResourceWithStreamingResponse(client.batch_inference) - self.datasets = datasets.AsyncDatasetsResourceWithStreamingResponse(client.datasets) + self.batch_inference = ( + batch_inference.AsyncBatchInferenceResourceWithStreamingResponse( + client.batch_inference + ) + ) + self.datasets = datasets.AsyncDatasetsResourceWithStreamingResponse( + client.datasets + ) self.eval = eval.AsyncEvalResourceWithStreamingResponse(client.eval) self.inspect = inspect.AsyncInspectResourceWithStreamingResponse(client.inspect) - self.inference = inference.AsyncInferenceResourceWithStreamingResponse(client.inference) + self.inference = inference.AsyncInferenceResourceWithStreamingResponse( + client.inference + ) self.memory = memory.AsyncMemoryResourceWithStreamingResponse(client.memory) - self.memory_banks = memory_banks.AsyncMemoryBanksResourceWithStreamingResponse(client.memory_banks) + self.memory_banks = memory_banks.AsyncMemoryBanksResourceWithStreamingResponse( + client.memory_banks + ) self.models = models.AsyncModelsResourceWithStreamingResponse(client.models) - self.post_training = post_training.AsyncPostTrainingResourceWithStreamingResponse(client.post_training) - self.providers = providers.AsyncProvidersResourceWithStreamingResponse(client.providers) + self.post_training = ( + post_training.AsyncPostTrainingResourceWithStreamingResponse( + client.post_training + ) + ) + self.providers = providers.AsyncProvidersResourceWithStreamingResponse( + client.providers + ) self.routes = routes.AsyncRoutesResourceWithStreamingResponse(client.routes) self.safety = safety.AsyncSafetyResourceWithStreamingResponse(client.safety) self.shields = shields.AsyncShieldsResourceWithStreamingResponse(client.shields) - self.synthetic_data_generation = ( - synthetic_data_generation.AsyncSyntheticDataGenerationResourceWithStreamingResponse( - client.synthetic_data_generation - ) + self.synthetic_data_generation = synthetic_data_generation.AsyncSyntheticDataGenerationResourceWithStreamingResponse( + client.synthetic_data_generation + ) + self.telemetry = telemetry.AsyncTelemetryResourceWithStreamingResponse( + client.telemetry + ) + self.datasetio = datasetio.AsyncDatasetioResourceWithStreamingResponse( + client.datasetio ) - self.telemetry = telemetry.AsyncTelemetryResourceWithStreamingResponse(client.telemetry) - self.datasetio = datasetio.AsyncDatasetioResourceWithStreamingResponse(client.datasetio) self.scoring = scoring.AsyncScoringResourceWithStreamingResponse(client.scoring) - self.scoring_functions = scoring_functions.AsyncScoringFunctionsResourceWithStreamingResponse( - client.scoring_functions + self.scoring_functions = ( + scoring_functions.AsyncScoringFunctionsResourceWithStreamingResponse( + client.scoring_functions + ) + ) + self.eval_tasks = eval_tasks.AsyncEvalTasksResourceWithStreamingResponse( + client.eval_tasks ) - self.eval_tasks = eval_tasks.AsyncEvalTasksResourceWithStreamingResponse(client.eval_tasks) Client = LlamaStackClient diff --git a/src/llama_stack_client/resources/agents/agents.py b/src/llama_stack_client/resources/agents/agents.py index 32dea896..23b8b5cc 100644 --- a/src/llama_stack_client/resources/agents/agents.py +++ b/src/llama_stack_client/resources/agents/agents.py @@ -20,7 +20,7 @@ StepsResourceWithStreamingResponse, AsyncStepsResourceWithStreamingResponse, ) -from ...types import agent_create_params, agent_delete_params +from ...types import agent_create_params from .session import ( SessionResource, AsyncSessionResource, @@ -115,7 +115,7 @@ def create( **(extra_headers or {}), } return self._post( - "/v1/agents/create", + "/v1/agents", body=maybe_transform({"agent_config": agent_config}, agent_create_params.AgentCreateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout @@ -125,8 +125,8 @@ def create( def delete( self, - *, agent_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -146,6 +146,8 @@ def delete( timeout: Override the client-level default timeout for this request, in seconds """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") extra_headers = {"Accept": "*/*", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -156,9 +158,8 @@ def delete( ), **(extra_headers or {}), } - return self._post( - "/v1/agents/delete", - body=maybe_transform({"agent_id": agent_id}, agent_delete_params.AgentDeleteParams), + return self._delete( + f"/v1/agents/{agent_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -231,7 +232,7 @@ async def create( **(extra_headers or {}), } return await self._post( - "/v1/agents/create", + "/v1/agents", body=await async_maybe_transform({"agent_config": agent_config}, agent_create_params.AgentCreateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout @@ -241,8 +242,8 @@ async def create( async def delete( self, - *, agent_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -262,6 +263,8 @@ async def delete( timeout: Override the client-level default timeout for this request, in seconds """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") extra_headers = {"Accept": "*/*", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -272,9 +275,8 @@ async def delete( ), **(extra_headers or {}), } - return await self._post( - "/v1/agents/delete", - body=await async_maybe_transform({"agent_id": agent_id}, agent_delete_params.AgentDeleteParams), + return await self._delete( + f"/v1/agents/{agent_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/llama_stack_client/resources/agents/session.py b/src/llama_stack_client/resources/agents/session.py index 1ed12c90..8497c980 100644 --- a/src/llama_stack_client/resources/agents/session.py +++ b/src/llama_stack_client/resources/agents/session.py @@ -21,7 +21,7 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import session_create_params, session_delete_params, session_retrieve_params +from ...types.agents import session_create_params, session_retrieve_params from ...types.agents.session import Session from ...types.agents.session_create_response import SessionCreateResponse @@ -50,8 +50,8 @@ def with_streaming_response(self) -> SessionResourceWithStreamingResponse: def create( self, - *, agent_id: str, + *, session_name: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, @@ -72,6 +72,8 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") extra_headers = { **strip_not_given( { @@ -82,14 +84,8 @@ def create( **(extra_headers or {}), } return self._post( - "/v1/agents/session/create", - body=maybe_transform( - { - "agent_id": agent_id, - "session_name": session_name, - }, - session_create_params.SessionCreateParams, - ), + f"/v1/agents/{agent_id}/session", + body=maybe_transform({"session_name": session_name}, session_create_params.SessionCreateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -98,9 +94,9 @@ def create( def retrieve( self, + session_id: str, *, agent_id: str, - session_id: str, turn_ids: List[str] | NotGiven = NOT_GIVEN, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, @@ -121,6 +117,10 @@ def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + if not session_id: + raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}") extra_headers = { **strip_not_given( { @@ -130,30 +130,23 @@ def retrieve( ), **(extra_headers or {}), } - return self._post( - "/v1/agents/session/get", - body=maybe_transform({"turn_ids": turn_ids}, session_retrieve_params.SessionRetrieveParams), + return self._get( + f"/v1/agents/{agent_id}/session/{session_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=maybe_transform( - { - "agent_id": agent_id, - "session_id": session_id, - }, - session_retrieve_params.SessionRetrieveParams, - ), + query=maybe_transform({"turn_ids": turn_ids}, session_retrieve_params.SessionRetrieveParams), ), cast_to=Session, ) def delete( self, + session_id: str, *, agent_id: str, - session_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -173,6 +166,10 @@ def delete( timeout: Override the client-level default timeout for this request, in seconds """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + if not session_id: + raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}") extra_headers = {"Accept": "*/*", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -183,15 +180,8 @@ def delete( ), **(extra_headers or {}), } - return self._post( - "/v1/agents/session/delete", - body=maybe_transform( - { - "agent_id": agent_id, - "session_id": session_id, - }, - session_delete_params.SessionDeleteParams, - ), + return self._delete( + f"/v1/agents/{agent_id}/session/{session_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -221,8 +211,8 @@ def with_streaming_response(self) -> AsyncSessionResourceWithStreamingResponse: async def create( self, - *, agent_id: str, + *, session_name: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, @@ -243,6 +233,8 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") extra_headers = { **strip_not_given( { @@ -253,14 +245,8 @@ async def create( **(extra_headers or {}), } return await self._post( - "/v1/agents/session/create", - body=await async_maybe_transform( - { - "agent_id": agent_id, - "session_name": session_name, - }, - session_create_params.SessionCreateParams, - ), + f"/v1/agents/{agent_id}/session", + body=await async_maybe_transform({"session_name": session_name}, session_create_params.SessionCreateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -269,9 +255,9 @@ async def create( async def retrieve( self, + session_id: str, *, agent_id: str, - session_id: str, turn_ids: List[str] | NotGiven = NOT_GIVEN, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, @@ -292,6 +278,10 @@ async def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + if not session_id: + raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}") extra_headers = { **strip_not_given( { @@ -301,20 +291,15 @@ async def retrieve( ), **(extra_headers or {}), } - return await self._post( - "/v1/agents/session/get", - body=await async_maybe_transform({"turn_ids": turn_ids}, session_retrieve_params.SessionRetrieveParams), + return await self._get( + f"/v1/agents/{agent_id}/session/{session_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=await async_maybe_transform( - { - "agent_id": agent_id, - "session_id": session_id, - }, - session_retrieve_params.SessionRetrieveParams, + {"turn_ids": turn_ids}, session_retrieve_params.SessionRetrieveParams ), ), cast_to=Session, @@ -322,9 +307,9 @@ async def retrieve( async def delete( self, + session_id: str, *, agent_id: str, - session_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -344,6 +329,10 @@ async def delete( timeout: Override the client-level default timeout for this request, in seconds """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + if not session_id: + raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}") extra_headers = {"Accept": "*/*", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -354,15 +343,8 @@ async def delete( ), **(extra_headers or {}), } - return await self._post( - "/v1/agents/session/delete", - body=await async_maybe_transform( - { - "agent_id": agent_id, - "session_id": session_id, - }, - session_delete_params.SessionDeleteParams, - ), + return await self._delete( + f"/v1/agents/{agent_id}/session/{session_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/llama_stack_client/resources/agents/steps.py b/src/llama_stack_client/resources/agents/steps.py index 06488eed..590c88fb 100644 --- a/src/llama_stack_client/resources/agents/steps.py +++ b/src/llama_stack_client/resources/agents/steps.py @@ -5,11 +5,7 @@ import httpx from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import ( - maybe_transform, - strip_not_given, - async_maybe_transform, -) +from ..._utils import strip_not_given from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( @@ -19,7 +15,6 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import step_retrieve_params from ...types.agents.step_retrieve_response import StepRetrieveResponse __all__ = ["StepsResource", "AsyncStepsResource"] @@ -47,10 +42,10 @@ def with_streaming_response(self) -> StepsResourceWithStreamingResponse: def retrieve( self, + step_id: str, *, agent_id: str, session_id: str, - step_id: str, turn_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, @@ -71,6 +66,14 @@ def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + if not session_id: + raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}") + if not turn_id: + raise ValueError(f"Expected a non-empty value for `turn_id` but received {turn_id!r}") + if not step_id: + raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}") extra_headers = { **strip_not_given( { @@ -81,21 +84,9 @@ def retrieve( **(extra_headers or {}), } return self._get( - "/v1/agents/step/get", + f"/v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "agent_id": agent_id, - "session_id": session_id, - "step_id": step_id, - "turn_id": turn_id, - }, - step_retrieve_params.StepRetrieveParams, - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=StepRetrieveResponse, ) @@ -123,10 +114,10 @@ def with_streaming_response(self) -> AsyncStepsResourceWithStreamingResponse: async def retrieve( self, + step_id: str, *, agent_id: str, session_id: str, - step_id: str, turn_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, @@ -147,6 +138,14 @@ async def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + if not session_id: + raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}") + if not turn_id: + raise ValueError(f"Expected a non-empty value for `turn_id` but received {turn_id!r}") + if not step_id: + raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}") extra_headers = { **strip_not_given( { @@ -157,21 +156,9 @@ async def retrieve( **(extra_headers or {}), } return await self._get( - "/v1/agents/step/get", + f"/v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "agent_id": agent_id, - "session_id": session_id, - "step_id": step_id, - "turn_id": turn_id, - }, - step_retrieve_params.StepRetrieveParams, - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=StepRetrieveResponse, ) diff --git a/src/llama_stack_client/resources/agents/turn.py b/src/llama_stack_client/resources/agents/turn.py index 8e671e26..d86fca7b 100644 --- a/src/llama_stack_client/resources/agents/turn.py +++ b/src/llama_stack_client/resources/agents/turn.py @@ -24,7 +24,7 @@ ) from ..._streaming import Stream, AsyncStream from ..._base_client import make_request_options -from ...types.agents import turn_create_params, turn_retrieve_params +from ...types.agents import turn_create_params from ...types.agents.turn import Turn from ...types.agents.turn_create_response import TurnCreateResponse @@ -54,10 +54,10 @@ def with_streaming_response(self) -> TurnResourceWithStreamingResponse: @overload def create( self, + session_id: str, *, agent_id: str, messages: Iterable[turn_create_params.Message], - session_id: str, documents: Iterable[turn_create_params.Document] | NotGiven = NOT_GIVEN, stream: Literal[False] | NotGiven = NOT_GIVEN, toolgroups: List[turn_create_params.Toolgroup] | NotGiven = NOT_GIVEN, @@ -85,10 +85,10 @@ def create( @overload def create( self, + session_id: str, *, agent_id: str, messages: Iterable[turn_create_params.Message], - session_id: str, stream: Literal[True], documents: Iterable[turn_create_params.Document] | NotGiven = NOT_GIVEN, toolgroups: List[turn_create_params.Toolgroup] | NotGiven = NOT_GIVEN, @@ -116,10 +116,10 @@ def create( @overload def create( self, + session_id: str, *, agent_id: str, messages: Iterable[turn_create_params.Message], - session_id: str, stream: bool, documents: Iterable[turn_create_params.Document] | NotGiven = NOT_GIVEN, toolgroups: List[turn_create_params.Toolgroup] | NotGiven = NOT_GIVEN, @@ -144,13 +144,13 @@ def create( """ ... - @required_args(["agent_id", "messages", "session_id"], ["agent_id", "messages", "session_id", "stream"]) + @required_args(["agent_id", "messages"], ["agent_id", "messages", "stream"]) def create( self, + session_id: str, *, agent_id: str, messages: Iterable[turn_create_params.Message], - session_id: str, documents: Iterable[turn_create_params.Document] | NotGiven = NOT_GIVEN, stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, toolgroups: List[turn_create_params.Toolgroup] | NotGiven = NOT_GIVEN, @@ -163,6 +163,10 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> TurnCreateResponse | Stream[TurnCreateResponse]: + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + if not session_id: + raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}") extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -176,12 +180,10 @@ def create( return cast( TurnCreateResponse, self._post( - "/v1/agents/turn/create", + f"/v1/agents/{agent_id}/session/{session_id}/turn", body=maybe_transform( { - "agent_id": agent_id, "messages": messages, - "session_id": session_id, "documents": documents, "stream": stream, "toolgroups": toolgroups, @@ -201,10 +203,10 @@ def create( def retrieve( self, + turn_id: str, *, agent_id: str, session_id: str, - turn_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -224,6 +226,12 @@ def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + if not session_id: + raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}") + if not turn_id: + raise ValueError(f"Expected a non-empty value for `turn_id` but received {turn_id!r}") extra_headers = { **strip_not_given( { @@ -234,20 +242,9 @@ def retrieve( **(extra_headers or {}), } return self._get( - "/v1/agents/turn/get", + f"/v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "agent_id": agent_id, - "session_id": session_id, - "turn_id": turn_id, - }, - turn_retrieve_params.TurnRetrieveParams, - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Turn, ) @@ -276,10 +273,10 @@ def with_streaming_response(self) -> AsyncTurnResourceWithStreamingResponse: @overload async def create( self, + session_id: str, *, agent_id: str, messages: Iterable[turn_create_params.Message], - session_id: str, documents: Iterable[turn_create_params.Document] | NotGiven = NOT_GIVEN, stream: Literal[False] | NotGiven = NOT_GIVEN, toolgroups: List[turn_create_params.Toolgroup] | NotGiven = NOT_GIVEN, @@ -307,10 +304,10 @@ async def create( @overload async def create( self, + session_id: str, *, agent_id: str, messages: Iterable[turn_create_params.Message], - session_id: str, stream: Literal[True], documents: Iterable[turn_create_params.Document] | NotGiven = NOT_GIVEN, toolgroups: List[turn_create_params.Toolgroup] | NotGiven = NOT_GIVEN, @@ -338,10 +335,10 @@ async def create( @overload async def create( self, + session_id: str, *, agent_id: str, messages: Iterable[turn_create_params.Message], - session_id: str, stream: bool, documents: Iterable[turn_create_params.Document] | NotGiven = NOT_GIVEN, toolgroups: List[turn_create_params.Toolgroup] | NotGiven = NOT_GIVEN, @@ -366,13 +363,13 @@ async def create( """ ... - @required_args(["agent_id", "messages", "session_id"], ["agent_id", "messages", "session_id", "stream"]) + @required_args(["agent_id", "messages"], ["agent_id", "messages", "stream"]) async def create( self, + session_id: str, *, agent_id: str, messages: Iterable[turn_create_params.Message], - session_id: str, documents: Iterable[turn_create_params.Document] | NotGiven = NOT_GIVEN, stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, toolgroups: List[turn_create_params.Toolgroup] | NotGiven = NOT_GIVEN, @@ -385,6 +382,10 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> TurnCreateResponse | AsyncStream[TurnCreateResponse]: + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + if not session_id: + raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}") extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -398,12 +399,10 @@ async def create( return cast( TurnCreateResponse, await self._post( - "/v1/agents/turn/create", + f"/v1/agents/{agent_id}/session/{session_id}/turn", body=await async_maybe_transform( { - "agent_id": agent_id, "messages": messages, - "session_id": session_id, "documents": documents, "stream": stream, "toolgroups": toolgroups, @@ -423,10 +422,10 @@ async def create( async def retrieve( self, + turn_id: str, *, agent_id: str, session_id: str, - turn_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -446,6 +445,12 @@ async def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not agent_id: + raise ValueError(f"Expected a non-empty value for `agent_id` but received {agent_id!r}") + if not session_id: + raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}") + if not turn_id: + raise ValueError(f"Expected a non-empty value for `turn_id` but received {turn_id!r}") extra_headers = { **strip_not_given( { @@ -456,20 +461,9 @@ async def retrieve( **(extra_headers or {}), } return await self._get( - "/v1/agents/turn/get", + f"/v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "agent_id": agent_id, - "session_id": session_id, - "turn_id": turn_id, - }, - turn_retrieve_params.TurnRetrieveParams, - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Turn, ) diff --git a/src/llama_stack_client/resources/datasetio.py b/src/llama_stack_client/resources/datasetio.py index 947c2bcb..c49a8f5b 100644 --- a/src/llama_stack_client/resources/datasetio.py +++ b/src/llama_stack_client/resources/datasetio.py @@ -82,7 +82,7 @@ def append_rows( **(extra_headers or {}), } return self._post( - "/v1/datasetio/append-rows", + "/v1/datasetio/rows", body=maybe_transform( { "dataset_id": dataset_id, @@ -132,7 +132,7 @@ def get_rows_paginated( **(extra_headers or {}), } return self._get( - "/v1/datasetio/get-rows-paginated", + "/v1/datasetio/rows", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -207,7 +207,7 @@ async def append_rows( **(extra_headers or {}), } return await self._post( - "/v1/datasetio/append-rows", + "/v1/datasetio/rows", body=await async_maybe_transform( { "dataset_id": dataset_id, @@ -257,7 +257,7 @@ async def get_rows_paginated( **(extra_headers or {}), } return await self._get( - "/v1/datasetio/get-rows-paginated", + "/v1/datasetio/rows", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/llama_stack_client/resources/datasets.py b/src/llama_stack_client/resources/datasets.py index 6cb7a4b3..4e9efe5d 100644 --- a/src/llama_stack_client/resources/datasets.py +++ b/src/llama_stack_client/resources/datasets.py @@ -6,7 +6,7 @@ import httpx -from ..types import dataset_register_params, dataset_retrieve_params, dataset_unregister_params +from ..types import dataset_register_params from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven from .._utils import ( maybe_transform, @@ -52,8 +52,8 @@ def with_streaming_response(self) -> DatasetsResourceWithStreamingResponse: def retrieve( self, - *, dataset_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -73,6 +73,8 @@ def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not dataset_id: + raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}") extra_headers = { **strip_not_given( { @@ -83,13 +85,9 @@ def retrieve( **(extra_headers or {}), } return self._get( - "/v1/datasets/get", + f"/v1/datasets/{dataset_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"dataset_id": dataset_id}, dataset_retrieve_params.DatasetRetrieveParams), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=DatasetRetrieveResponse, ) @@ -116,7 +114,6 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -127,7 +124,7 @@ def list( **(extra_headers or {}), } return self._get( - "/v1/datasets/list", + "/v1/datasets", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -173,7 +170,7 @@ def register( **(extra_headers or {}), } return self._post( - "/v1/datasets/register", + "/v1/datasets", body=maybe_transform( { "dataset_id": dataset_id, @@ -193,8 +190,8 @@ def register( def unregister( self, - *, dataset_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -214,6 +211,8 @@ def unregister( timeout: Override the client-level default timeout for this request, in seconds """ + if not dataset_id: + raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}") extra_headers = {"Accept": "*/*", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -224,9 +223,8 @@ def unregister( ), **(extra_headers or {}), } - return self._post( - "/v1/datasets/unregister", - body=maybe_transform({"dataset_id": dataset_id}, dataset_unregister_params.DatasetUnregisterParams), + return self._delete( + f"/v1/datasets/{dataset_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -256,8 +254,8 @@ def with_streaming_response(self) -> AsyncDatasetsResourceWithStreamingResponse: async def retrieve( self, - *, dataset_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -277,6 +275,8 @@ async def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not dataset_id: + raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}") extra_headers = { **strip_not_given( { @@ -287,15 +287,9 @@ async def retrieve( **(extra_headers or {}), } return await self._get( - "/v1/datasets/get", + f"/v1/datasets/{dataset_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - {"dataset_id": dataset_id}, dataset_retrieve_params.DatasetRetrieveParams - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=DatasetRetrieveResponse, ) @@ -322,7 +316,6 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -333,7 +326,7 @@ async def list( **(extra_headers or {}), } return await self._get( - "/v1/datasets/list", + "/v1/datasets", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -379,7 +372,7 @@ async def register( **(extra_headers or {}), } return await self._post( - "/v1/datasets/register", + "/v1/datasets", body=await async_maybe_transform( { "dataset_id": dataset_id, @@ -399,8 +392,8 @@ async def register( async def unregister( self, - *, dataset_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -420,6 +413,8 @@ async def unregister( timeout: Override the client-level default timeout for this request, in seconds """ + if not dataset_id: + raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}") extra_headers = {"Accept": "*/*", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -430,11 +425,8 @@ async def unregister( ), **(extra_headers or {}), } - return await self._post( - "/v1/datasets/unregister", - body=await async_maybe_transform( - {"dataset_id": dataset_id}, dataset_unregister_params.DatasetUnregisterParams - ), + return await self._delete( + f"/v1/datasets/{dataset_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/llama_stack_client/resources/eval/eval.py b/src/llama_stack_client/resources/eval/eval.py index 47673d8b..094ad09d 100644 --- a/src/llama_stack_client/resources/eval/eval.py +++ b/src/llama_stack_client/resources/eval/eval.py @@ -146,7 +146,7 @@ def run_eval( **(extra_headers or {}), } return self._post( - "/v1/eval/run-eval", + "/v1/eval/run", body=maybe_transform( { "task_config": task_config, @@ -271,7 +271,7 @@ async def run_eval( **(extra_headers or {}), } return await self._post( - "/v1/eval/run-eval", + "/v1/eval/run", body=await async_maybe_transform( { "task_config": task_config, diff --git a/src/llama_stack_client/resources/eval/jobs.py b/src/llama_stack_client/resources/eval/jobs.py index aada8a42..944ca1a9 100644 --- a/src/llama_stack_client/resources/eval/jobs.py +++ b/src/llama_stack_client/resources/eval/jobs.py @@ -50,8 +50,8 @@ def with_streaming_response(self) -> JobsResourceWithStreamingResponse: def retrieve( self, - *, job_id: str, + *, task_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, @@ -72,6 +72,8 @@ def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not job_id: + raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") extra_headers = { **strip_not_given( { @@ -82,19 +84,13 @@ def retrieve( **(extra_headers or {}), } return self._get( - "/v1/eval/job/result", + f"/v1/eval/jobs/{job_id}/result", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=maybe_transform( - { - "job_id": job_id, - "task_id": task_id, - }, - job_retrieve_params.JobRetrieveParams, - ), + query=maybe_transform({"task_id": task_id}, job_retrieve_params.JobRetrieveParams), ), cast_to=EvaluateResponse, ) @@ -134,7 +130,7 @@ def cancel( **(extra_headers or {}), } return self._post( - "/v1/eval/job/cancel", + "/v1/eval/jobs/cancel", body=maybe_transform( { "job_id": job_id, @@ -150,8 +146,8 @@ def cancel( def status( self, - *, job_id: str, + *, task_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, @@ -172,6 +168,8 @@ def status( timeout: Override the client-level default timeout for this request, in seconds """ + if not job_id: + raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") extra_headers = { **strip_not_given( { @@ -182,19 +180,13 @@ def status( **(extra_headers or {}), } return self._get( - "/v1/eval/job/status", + f"/v1/eval/jobs/{job_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=maybe_transform( - { - "job_id": job_id, - "task_id": task_id, - }, - job_status_params.JobStatusParams, - ), + query=maybe_transform({"task_id": task_id}, job_status_params.JobStatusParams), ), cast_to=JobStatusResponse, ) @@ -222,8 +214,8 @@ def with_streaming_response(self) -> AsyncJobsResourceWithStreamingResponse: async def retrieve( self, - *, job_id: str, + *, task_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, @@ -244,6 +236,8 @@ async def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not job_id: + raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") extra_headers = { **strip_not_given( { @@ -254,19 +248,13 @@ async def retrieve( **(extra_headers or {}), } return await self._get( - "/v1/eval/job/result", + f"/v1/eval/jobs/{job_id}/result", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=await async_maybe_transform( - { - "job_id": job_id, - "task_id": task_id, - }, - job_retrieve_params.JobRetrieveParams, - ), + query=await async_maybe_transform({"task_id": task_id}, job_retrieve_params.JobRetrieveParams), ), cast_to=EvaluateResponse, ) @@ -306,7 +294,7 @@ async def cancel( **(extra_headers or {}), } return await self._post( - "/v1/eval/job/cancel", + "/v1/eval/jobs/cancel", body=await async_maybe_transform( { "job_id": job_id, @@ -322,8 +310,8 @@ async def cancel( async def status( self, - *, job_id: str, + *, task_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, @@ -344,6 +332,8 @@ async def status( timeout: Override the client-level default timeout for this request, in seconds """ + if not job_id: + raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") extra_headers = { **strip_not_given( { @@ -354,19 +344,13 @@ async def status( **(extra_headers or {}), } return await self._get( - "/v1/eval/job/status", + f"/v1/eval/jobs/{job_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=await async_maybe_transform( - { - "job_id": job_id, - "task_id": task_id, - }, - job_status_params.JobStatusParams, - ), + query=await async_maybe_transform({"task_id": task_id}, job_status_params.JobStatusParams), ), cast_to=JobStatusResponse, ) diff --git a/src/llama_stack_client/resources/eval_tasks.py b/src/llama_stack_client/resources/eval_tasks.py index 9ee4f2cf..f869f804 100644 --- a/src/llama_stack_client/resources/eval_tasks.py +++ b/src/llama_stack_client/resources/eval_tasks.py @@ -6,7 +6,7 @@ import httpx -from ..types import eval_task_register_params, eval_task_retrieve_params +from ..types import eval_task_register_params from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven from .._utils import ( maybe_transform, @@ -23,6 +23,7 @@ ) from .._base_client import make_request_options from ..types.eval_task import EvalTask +from ..types.eval_task_list_response import EvalTaskListResponse __all__ = ["EvalTasksResource", "AsyncEvalTasksResource"] @@ -49,8 +50,8 @@ def with_streaming_response(self) -> EvalTasksResourceWithStreamingResponse: def retrieve( self, + eval_task_id: str, *, - name: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -70,6 +71,8 @@ def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not eval_task_id: + raise ValueError(f"Expected a non-empty value for `eval_task_id` but received {eval_task_id!r}") extra_headers = { **strip_not_given( { @@ -80,13 +83,9 @@ def retrieve( **(extra_headers or {}), } return self._get( - "/v1/eval-tasks/get", + f"/v1/eval-tasks/{eval_task_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"name": name}, eval_task_retrieve_params.EvalTaskRetrieveParams), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=EvalTask, ) @@ -102,7 +101,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> EvalTask: + ) -> EvalTaskListResponse: """ Args: extra_headers: Send extra headers @@ -113,7 +112,6 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -124,11 +122,11 @@ def list( **(extra_headers or {}), } return self._get( - "/v1/eval-tasks/list", + "/v1/eval-tasks", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=EvalTask, + cast_to=EvalTaskListResponse, ) def register( @@ -170,7 +168,7 @@ def register( **(extra_headers or {}), } return self._post( - "/v1/eval-tasks/register", + "/v1/eval-tasks", body=maybe_transform( { "dataset_id": dataset_id, @@ -211,8 +209,8 @@ def with_streaming_response(self) -> AsyncEvalTasksResourceWithStreamingResponse async def retrieve( self, + eval_task_id: str, *, - name: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -232,6 +230,8 @@ async def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not eval_task_id: + raise ValueError(f"Expected a non-empty value for `eval_task_id` but received {eval_task_id!r}") extra_headers = { **strip_not_given( { @@ -242,13 +242,9 @@ async def retrieve( **(extra_headers or {}), } return await self._get( - "/v1/eval-tasks/get", + f"/v1/eval-tasks/{eval_task_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform({"name": name}, eval_task_retrieve_params.EvalTaskRetrieveParams), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=EvalTask, ) @@ -264,7 +260,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> EvalTask: + ) -> EvalTaskListResponse: """ Args: extra_headers: Send extra headers @@ -275,7 +271,6 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -286,11 +281,11 @@ async def list( **(extra_headers or {}), } return await self._get( - "/v1/eval-tasks/list", + "/v1/eval-tasks", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=EvalTask, + cast_to=EvalTaskListResponse, ) async def register( @@ -332,7 +327,7 @@ async def register( **(extra_headers or {}), } return await self._post( - "/v1/eval-tasks/register", + "/v1/eval-tasks", body=await async_maybe_transform( { "dataset_id": dataset_id, diff --git a/src/llama_stack_client/resources/memory_banks.py b/src/llama_stack_client/resources/memory_banks.py index 4aa7c2ca..1b4a393a 100644 --- a/src/llama_stack_client/resources/memory_banks.py +++ b/src/llama_stack_client/resources/memory_banks.py @@ -6,11 +6,7 @@ import httpx -from ..types import ( - memory_bank_register_params, - memory_bank_retrieve_params, - memory_bank_unregister_params, -) +from ..types import memory_bank_register_params from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven from .._utils import ( maybe_transform, @@ -55,8 +51,8 @@ def with_streaming_response(self) -> MemoryBanksResourceWithStreamingResponse: def retrieve( self, - *, memory_bank_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -76,6 +72,8 @@ def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not memory_bank_id: + raise ValueError(f"Expected a non-empty value for `memory_bank_id` but received {memory_bank_id!r}") extra_headers = { **strip_not_given( { @@ -88,15 +86,9 @@ def retrieve( return cast( Optional[MemoryBankRetrieveResponse], self._get( - "/v1/memory-banks/get", + f"/v1/memory-banks/{memory_bank_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - {"memory_bank_id": memory_bank_id}, memory_bank_retrieve_params.MemoryBankRetrieveParams - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=cast( Any, MemoryBankRetrieveResponse @@ -126,7 +118,6 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -136,17 +127,12 @@ def list( ), **(extra_headers or {}), } - return cast( - MemoryBankListResponse, - self._get( - "/v1/memory-banks/list", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=cast( - Any, MemoryBankListResponse - ), # Union types cannot be passed in as arguments in the type system + return self._get( + "/v1/memory-banks", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), + cast_to=MemoryBankListResponse, ) def register( @@ -187,7 +173,7 @@ def register( return cast( MemoryBankRegisterResponse, self._post( - "/v1/memory-banks/register", + "/v1/memory-banks", body=maybe_transform( { "memory_bank_id": memory_bank_id, @@ -208,8 +194,8 @@ def register( def unregister( self, - *, memory_bank_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -229,6 +215,8 @@ def unregister( timeout: Override the client-level default timeout for this request, in seconds """ + if not memory_bank_id: + raise ValueError(f"Expected a non-empty value for `memory_bank_id` but received {memory_bank_id!r}") extra_headers = {"Accept": "*/*", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -239,11 +227,8 @@ def unregister( ), **(extra_headers or {}), } - return self._post( - "/v1/memory-banks/unregister", - body=maybe_transform( - {"memory_bank_id": memory_bank_id}, memory_bank_unregister_params.MemoryBankUnregisterParams - ), + return self._delete( + f"/v1/memory-banks/{memory_bank_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -273,8 +258,8 @@ def with_streaming_response(self) -> AsyncMemoryBanksResourceWithStreamingRespon async def retrieve( self, - *, memory_bank_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -294,6 +279,8 @@ async def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not memory_bank_id: + raise ValueError(f"Expected a non-empty value for `memory_bank_id` but received {memory_bank_id!r}") extra_headers = { **strip_not_given( { @@ -306,15 +293,9 @@ async def retrieve( return cast( Optional[MemoryBankRetrieveResponse], await self._get( - "/v1/memory-banks/get", + f"/v1/memory-banks/{memory_bank_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - {"memory_bank_id": memory_bank_id}, memory_bank_retrieve_params.MemoryBankRetrieveParams - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=cast( Any, MemoryBankRetrieveResponse @@ -344,7 +325,6 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -354,17 +334,12 @@ async def list( ), **(extra_headers or {}), } - return cast( - MemoryBankListResponse, - await self._get( - "/v1/memory-banks/list", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=cast( - Any, MemoryBankListResponse - ), # Union types cannot be passed in as arguments in the type system + return await self._get( + "/v1/memory-banks", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), + cast_to=MemoryBankListResponse, ) async def register( @@ -405,7 +380,7 @@ async def register( return cast( MemoryBankRegisterResponse, await self._post( - "/v1/memory-banks/register", + "/v1/memory-banks", body=await async_maybe_transform( { "memory_bank_id": memory_bank_id, @@ -426,8 +401,8 @@ async def register( async def unregister( self, - *, memory_bank_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -447,6 +422,8 @@ async def unregister( timeout: Override the client-level default timeout for this request, in seconds """ + if not memory_bank_id: + raise ValueError(f"Expected a non-empty value for `memory_bank_id` but received {memory_bank_id!r}") extra_headers = {"Accept": "*/*", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -457,11 +434,8 @@ async def unregister( ), **(extra_headers or {}), } - return await self._post( - "/v1/memory-banks/unregister", - body=await async_maybe_transform( - {"memory_bank_id": memory_bank_id}, memory_bank_unregister_params.MemoryBankUnregisterParams - ), + return await self._delete( + f"/v1/memory-banks/{memory_bank_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/llama_stack_client/resources/models.py b/src/llama_stack_client/resources/models.py index 9fe03ccd..e75525e6 100644 --- a/src/llama_stack_client/resources/models.py +++ b/src/llama_stack_client/resources/models.py @@ -7,7 +7,7 @@ import httpx -from ..types import model_register_params, model_retrieve_params, model_unregister_params +from ..types import model_register_params from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven from .._utils import ( maybe_transform, @@ -24,6 +24,7 @@ ) from ..types.model import Model from .._base_client import make_request_options +from ..types.model_list_response import ModelListResponse __all__ = ["ModelsResource", "AsyncModelsResource"] @@ -50,8 +51,8 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: def retrieve( self, + model_id: str, *, - identifier: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -71,6 +72,8 @@ def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not model_id: + raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}") extra_headers = { **strip_not_given( { @@ -81,13 +84,9 @@ def retrieve( **(extra_headers or {}), } return self._get( - "/v1/models/get", + f"/v1/models/{model_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"identifier": identifier}, model_retrieve_params.ModelRetrieveParams), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Model, ) @@ -103,7 +102,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Model: + ) -> ModelListResponse: """ Args: extra_headers: Send extra headers @@ -114,7 +113,6 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -125,11 +123,11 @@ def list( **(extra_headers or {}), } return self._get( - "/v1/models/list", + "/v1/models", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=Model, + cast_to=ModelListResponse, ) def register( @@ -169,7 +167,7 @@ def register( **(extra_headers or {}), } return self._post( - "/v1/models/register", + "/v1/models", body=maybe_transform( { "model_id": model_id, @@ -188,8 +186,8 @@ def register( def unregister( self, - *, model_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -209,6 +207,8 @@ def unregister( timeout: Override the client-level default timeout for this request, in seconds """ + if not model_id: + raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}") extra_headers = {"Accept": "*/*", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -219,9 +219,8 @@ def unregister( ), **(extra_headers or {}), } - return self._post( - "/v1/models/unregister", - body=maybe_transform({"model_id": model_id}, model_unregister_params.ModelUnregisterParams), + return self._delete( + f"/v1/models/{model_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -251,8 +250,8 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: async def retrieve( self, + model_id: str, *, - identifier: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -272,6 +271,8 @@ async def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not model_id: + raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}") extra_headers = { **strip_not_given( { @@ -282,15 +283,9 @@ async def retrieve( **(extra_headers or {}), } return await self._get( - "/v1/models/get", + f"/v1/models/{model_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - {"identifier": identifier}, model_retrieve_params.ModelRetrieveParams - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Model, ) @@ -306,7 +301,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Model: + ) -> ModelListResponse: """ Args: extra_headers: Send extra headers @@ -317,7 +312,6 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -328,11 +322,11 @@ async def list( **(extra_headers or {}), } return await self._get( - "/v1/models/list", + "/v1/models", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=Model, + cast_to=ModelListResponse, ) async def register( @@ -372,7 +366,7 @@ async def register( **(extra_headers or {}), } return await self._post( - "/v1/models/register", + "/v1/models", body=await async_maybe_transform( { "model_id": model_id, @@ -391,8 +385,8 @@ async def register( async def unregister( self, - *, model_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -412,6 +406,8 @@ async def unregister( timeout: Override the client-level default timeout for this request, in seconds """ + if not model_id: + raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}") extra_headers = {"Accept": "*/*", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -422,9 +418,8 @@ async def unregister( ), **(extra_headers or {}), } - return await self._post( - "/v1/models/unregister", - body=await async_maybe_transform({"model_id": model_id}, model_unregister_params.ModelUnregisterParams), + return await self._delete( + f"/v1/models/{model_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/llama_stack_client/resources/post_training/job.py b/src/llama_stack_client/resources/post_training/job.py index 74b65c27..fe80a69e 100644 --- a/src/llama_stack_client/resources/post_training/job.py +++ b/src/llama_stack_client/resources/post_training/job.py @@ -22,7 +22,7 @@ ) from ..._base_client import make_request_options from ...types.post_training import job_cancel_params, job_status_params, job_artifacts_params -from ...types.post_training_job import PostTrainingJob +from ...types.post_training.job_list_response import JobListResponse from ...types.post_training.job_status_response import JobStatusResponse from ...types.post_training.job_artifacts_response import JobArtifactsResponse @@ -60,7 +60,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> PostTrainingJob: + ) -> JobListResponse: """ Args: extra_headers: Send extra headers @@ -71,7 +71,6 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -86,7 +85,7 @@ def list( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=PostTrainingJob, + cast_to=JobListResponse, ) def artifacts( @@ -251,7 +250,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> PostTrainingJob: + ) -> JobListResponse: """ Args: extra_headers: Send extra headers @@ -262,7 +261,6 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -277,7 +275,7 @@ async def list( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=PostTrainingJob, + cast_to=JobListResponse, ) async def artifacts( diff --git a/src/llama_stack_client/resources/scoring_functions.py b/src/llama_stack_client/resources/scoring_functions.py index 2b46e592..07f6f55f 100644 --- a/src/llama_stack_client/resources/scoring_functions.py +++ b/src/llama_stack_client/resources/scoring_functions.py @@ -6,7 +6,7 @@ import httpx -from ..types import scoring_function_register_params, scoring_function_retrieve_params +from ..types import scoring_function_register_params from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven from .._utils import ( maybe_transform, @@ -24,6 +24,7 @@ from .._base_client import make_request_options from ..types.scoring_fn import ScoringFn from ..types.shared_params.return_type import ReturnType +from ..types.scoring_function_list_response import ScoringFunctionListResponse __all__ = ["ScoringFunctionsResource", "AsyncScoringFunctionsResource"] @@ -50,8 +51,8 @@ def with_streaming_response(self) -> ScoringFunctionsResourceWithStreamingRespon def retrieve( self, - *, scoring_fn_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -71,6 +72,8 @@ def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not scoring_fn_id: + raise ValueError(f"Expected a non-empty value for `scoring_fn_id` but received {scoring_fn_id!r}") extra_headers = { **strip_not_given( { @@ -81,15 +84,9 @@ def retrieve( **(extra_headers or {}), } return self._get( - "/v1/scoring-functions/get", + f"/v1/scoring-functions/{scoring_fn_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - {"scoring_fn_id": scoring_fn_id}, scoring_function_retrieve_params.ScoringFunctionRetrieveParams - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ScoringFn, ) @@ -105,7 +102,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ScoringFn: + ) -> ScoringFunctionListResponse: """ Args: extra_headers: Send extra headers @@ -116,7 +113,6 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -127,11 +123,11 @@ def list( **(extra_headers or {}), } return self._get( - "/v1/scoring-functions/list", + "/v1/scoring-functions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ScoringFn, + cast_to=ScoringFunctionListResponse, ) def register( @@ -173,7 +169,7 @@ def register( **(extra_headers or {}), } return self._post( - "/v1/scoring-functions/register", + "/v1/scoring-functions", body=maybe_transform( { "description": description, @@ -214,8 +210,8 @@ def with_streaming_response(self) -> AsyncScoringFunctionsResourceWithStreamingR async def retrieve( self, - *, scoring_fn_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -235,6 +231,8 @@ async def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not scoring_fn_id: + raise ValueError(f"Expected a non-empty value for `scoring_fn_id` but received {scoring_fn_id!r}") extra_headers = { **strip_not_given( { @@ -245,15 +243,9 @@ async def retrieve( **(extra_headers or {}), } return await self._get( - "/v1/scoring-functions/get", + f"/v1/scoring-functions/{scoring_fn_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - {"scoring_fn_id": scoring_fn_id}, scoring_function_retrieve_params.ScoringFunctionRetrieveParams - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ScoringFn, ) @@ -269,7 +261,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ScoringFn: + ) -> ScoringFunctionListResponse: """ Args: extra_headers: Send extra headers @@ -280,7 +272,6 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -291,11 +282,11 @@ async def list( **(extra_headers or {}), } return await self._get( - "/v1/scoring-functions/list", + "/v1/scoring-functions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ScoringFn, + cast_to=ScoringFunctionListResponse, ) async def register( @@ -337,7 +328,7 @@ async def register( **(extra_headers or {}), } return await self._post( - "/v1/scoring-functions/register", + "/v1/scoring-functions", body=await async_maybe_transform( { "description": description, diff --git a/src/llama_stack_client/resources/shields.py b/src/llama_stack_client/resources/shields.py index 07b85a8a..3c14a6f7 100644 --- a/src/llama_stack_client/resources/shields.py +++ b/src/llama_stack_client/resources/shields.py @@ -6,7 +6,7 @@ import httpx -from ..types import shield_register_params, shield_retrieve_params +from ..types import shield_register_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven from .._utils import ( maybe_transform, @@ -23,6 +23,7 @@ ) from .._base_client import make_request_options from ..types.shield import Shield +from ..types.shield_list_response import ShieldListResponse __all__ = ["ShieldsResource", "AsyncShieldsResource"] @@ -49,8 +50,8 @@ def with_streaming_response(self) -> ShieldsResourceWithStreamingResponse: def retrieve( self, - *, identifier: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -70,6 +71,8 @@ def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not identifier: + raise ValueError(f"Expected a non-empty value for `identifier` but received {identifier!r}") extra_headers = { **strip_not_given( { @@ -80,13 +83,9 @@ def retrieve( **(extra_headers or {}), } return self._get( - "/v1/shields/get", + f"/v1/shields/{identifier}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"identifier": identifier}, shield_retrieve_params.ShieldRetrieveParams), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Shield, ) @@ -102,7 +101,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Shield: + ) -> ShieldListResponse: """ Args: extra_headers: Send extra headers @@ -113,7 +112,6 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -124,11 +122,11 @@ def list( **(extra_headers or {}), } return self._get( - "/v1/shields/list", + "/v1/shields", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=Shield, + cast_to=ShieldListResponse, ) def register( @@ -167,7 +165,7 @@ def register( **(extra_headers or {}), } return self._post( - "/v1/shields/register", + "/v1/shields", body=maybe_transform( { "shield_id": shield_id, @@ -206,8 +204,8 @@ def with_streaming_response(self) -> AsyncShieldsResourceWithStreamingResponse: async def retrieve( self, - *, identifier: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -227,6 +225,8 @@ async def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not identifier: + raise ValueError(f"Expected a non-empty value for `identifier` but received {identifier!r}") extra_headers = { **strip_not_given( { @@ -237,15 +237,9 @@ async def retrieve( **(extra_headers or {}), } return await self._get( - "/v1/shields/get", + f"/v1/shields/{identifier}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - {"identifier": identifier}, shield_retrieve_params.ShieldRetrieveParams - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Shield, ) @@ -261,7 +255,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Shield: + ) -> ShieldListResponse: """ Args: extra_headers: Send extra headers @@ -272,7 +266,6 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -283,11 +276,11 @@ async def list( **(extra_headers or {}), } return await self._get( - "/v1/shields/list", + "/v1/shields", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=Shield, + cast_to=ShieldListResponse, ) async def register( @@ -326,7 +319,7 @@ async def register( **(extra_headers or {}), } return await self._post( - "/v1/shields/register", + "/v1/shields", body=await async_maybe_transform( { "shield_id": shield_id, diff --git a/src/llama_stack_client/resources/telemetry.py b/src/llama_stack_client/resources/telemetry.py index 0cb7b71f..8125d5c5 100644 --- a/src/llama_stack_client/resources/telemetry.py +++ b/src/llama_stack_client/resources/telemetry.py @@ -59,8 +59,8 @@ def get_span_tree( self, *, span_id: str, - max_depth: int | NotGiven = NOT_GIVEN, attributes_to_return: List[str] | NotGiven = NOT_GIVEN, + max_depth: int | NotGiven = NOT_GIVEN, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -90,23 +90,17 @@ def get_span_tree( **(extra_headers or {}), } return self._post( - "/v1/telemetry/get-span-tree", + "/v1/telemetry/query-span-tree", body=maybe_transform( - {"attributes_to_return": attributes_to_return}, + { + "span_id": span_id, + "attributes_to_return": attributes_to_return, + "max_depth": max_depth, + }, telemetry_get_span_tree_params.TelemetryGetSpanTreeParams, ), options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "span_id": span_id, - "max_depth": max_depth, - }, - telemetry_get_span_tree_params.TelemetryGetSpanTreeParams, - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=TelemetryGetSpanTreeResponse, ) @@ -342,8 +336,8 @@ async def get_span_tree( self, *, span_id: str, - max_depth: int | NotGiven = NOT_GIVEN, attributes_to_return: List[str] | NotGiven = NOT_GIVEN, + max_depth: int | NotGiven = NOT_GIVEN, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -373,23 +367,17 @@ async def get_span_tree( **(extra_headers or {}), } return await self._post( - "/v1/telemetry/get-span-tree", + "/v1/telemetry/query-span-tree", body=await async_maybe_transform( - {"attributes_to_return": attributes_to_return}, + { + "span_id": span_id, + "attributes_to_return": attributes_to_return, + "max_depth": max_depth, + }, telemetry_get_span_tree_params.TelemetryGetSpanTreeParams, ), options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "span_id": span_id, - "max_depth": max_depth, - }, - telemetry_get_span_tree_params.TelemetryGetSpanTreeParams, - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=TelemetryGetSpanTreeResponse, ) diff --git a/src/llama_stack_client/resources/tool_runtime.py b/src/llama_stack_client/resources/tool_runtime.py index 401896eb..16aa4b69 100644 --- a/src/llama_stack_client/resources/tool_runtime.py +++ b/src/llama_stack_client/resources/tool_runtime.py @@ -102,8 +102,8 @@ def invoke_tool( def list_tools( self, *, - tool_group_id: str | NotGiven = NOT_GIVEN, mcp_endpoint: URL | NotGiven = NOT_GIVEN, + tool_group_id: str | NotGiven = NOT_GIVEN, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -133,18 +133,19 @@ def list_tools( ), **(extra_headers or {}), } - return self._post( + return self._get( "/v1/tool-runtime/list-tools", - body=maybe_transform( - {"mcp_endpoint": mcp_endpoint}, tool_runtime_list_tools_params.ToolRuntimeListToolsParams - ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( - {"tool_group_id": tool_group_id}, tool_runtime_list_tools_params.ToolRuntimeListToolsParams + { + "mcp_endpoint": mcp_endpoint, + "tool_group_id": tool_group_id, + }, + tool_runtime_list_tools_params.ToolRuntimeListToolsParams, ), ), cast_to=ToolDef, @@ -224,8 +225,8 @@ async def invoke_tool( async def list_tools( self, *, - tool_group_id: str | NotGiven = NOT_GIVEN, mcp_endpoint: URL | NotGiven = NOT_GIVEN, + tool_group_id: str | NotGiven = NOT_GIVEN, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -255,18 +256,19 @@ async def list_tools( ), **(extra_headers or {}), } - return await self._post( + return await self._get( "/v1/tool-runtime/list-tools", - body=await async_maybe_transform( - {"mcp_endpoint": mcp_endpoint}, tool_runtime_list_tools_params.ToolRuntimeListToolsParams - ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=await async_maybe_transform( - {"tool_group_id": tool_group_id}, tool_runtime_list_tools_params.ToolRuntimeListToolsParams + { + "mcp_endpoint": mcp_endpoint, + "tool_group_id": tool_group_id, + }, + tool_runtime_list_tools_params.ToolRuntimeListToolsParams, ), ), cast_to=ToolDef, diff --git a/src/llama_stack_client/resources/toolgroups.py b/src/llama_stack_client/resources/toolgroups.py index e2748f21..0adb5e7f 100644 --- a/src/llama_stack_client/resources/toolgroups.py +++ b/src/llama_stack_client/resources/toolgroups.py @@ -6,7 +6,7 @@ import httpx -from ..types import toolgroup_get_params, toolgroup_register_params, toolgroup_unregister_params +from ..types import toolgroup_register_params from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven from .._utils import ( maybe_transform, @@ -24,6 +24,7 @@ from .._base_client import make_request_options from ..types.tool_group import ToolGroup from ..types.shared_params.url import URL +from ..types.toolgroup_list_response import ToolgroupListResponse __all__ = ["ToolgroupsResource", "AsyncToolgroupsResource"] @@ -59,7 +60,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ToolGroup: + ) -> ToolgroupListResponse: """ List tool groups with optional provider @@ -72,7 +73,6 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -83,17 +83,17 @@ def list( **(extra_headers or {}), } return self._get( - "/v1/toolgroups/list", + "/v1/toolgroups", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ToolGroup, + cast_to=ToolgroupListResponse, ) def get( self, - *, toolgroup_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -113,6 +113,8 @@ def get( timeout: Override the client-level default timeout for this request, in seconds """ + if not toolgroup_id: + raise ValueError(f"Expected a non-empty value for `toolgroup_id` but received {toolgroup_id!r}") extra_headers = { **strip_not_given( { @@ -123,13 +125,9 @@ def get( **(extra_headers or {}), } return self._get( - "/v1/toolgroups/get", + f"/v1/toolgroups/{toolgroup_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"toolgroup_id": toolgroup_id}, toolgroup_get_params.ToolgroupGetParams), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ToolGroup, ) @@ -173,7 +171,7 @@ def register( **(extra_headers or {}), } return self._post( - "/v1/toolgroups/register", + "/v1/toolgroups", body=maybe_transform( { "provider_id": provider_id, @@ -191,8 +189,8 @@ def register( def unregister( self, + toolgroup_id: str, *, - tool_group_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -214,6 +212,8 @@ def unregister( timeout: Override the client-level default timeout for this request, in seconds """ + if not toolgroup_id: + raise ValueError(f"Expected a non-empty value for `toolgroup_id` but received {toolgroup_id!r}") extra_headers = {"Accept": "*/*", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -224,11 +224,8 @@ def unregister( ), **(extra_headers or {}), } - return self._post( - "/v1/toolgroups/unregister", - body=maybe_transform( - {"tool_group_id": tool_group_id}, toolgroup_unregister_params.ToolgroupUnregisterParams - ), + return self._delete( + f"/v1/toolgroups/{toolgroup_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -267,7 +264,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ToolGroup: + ) -> ToolgroupListResponse: """ List tool groups with optional provider @@ -280,7 +277,6 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -291,17 +287,17 @@ async def list( **(extra_headers or {}), } return await self._get( - "/v1/toolgroups/list", + "/v1/toolgroups", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ToolGroup, + cast_to=ToolgroupListResponse, ) async def get( self, - *, toolgroup_id: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -321,6 +317,8 @@ async def get( timeout: Override the client-level default timeout for this request, in seconds """ + if not toolgroup_id: + raise ValueError(f"Expected a non-empty value for `toolgroup_id` but received {toolgroup_id!r}") extra_headers = { **strip_not_given( { @@ -331,15 +329,9 @@ async def get( **(extra_headers or {}), } return await self._get( - "/v1/toolgroups/get", + f"/v1/toolgroups/{toolgroup_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - {"toolgroup_id": toolgroup_id}, toolgroup_get_params.ToolgroupGetParams - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ToolGroup, ) @@ -383,7 +375,7 @@ async def register( **(extra_headers or {}), } return await self._post( - "/v1/toolgroups/register", + "/v1/toolgroups", body=await async_maybe_transform( { "provider_id": provider_id, @@ -401,8 +393,8 @@ async def register( async def unregister( self, + toolgroup_id: str, *, - tool_group_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -424,6 +416,8 @@ async def unregister( timeout: Override the client-level default timeout for this request, in seconds """ + if not toolgroup_id: + raise ValueError(f"Expected a non-empty value for `toolgroup_id` but received {toolgroup_id!r}") extra_headers = {"Accept": "*/*", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -434,11 +428,8 @@ async def unregister( ), **(extra_headers or {}), } - return await self._post( - "/v1/toolgroups/unregister", - body=await async_maybe_transform( - {"tool_group_id": tool_group_id}, toolgroup_unregister_params.ToolgroupUnregisterParams - ), + return await self._delete( + f"/v1/toolgroups/{toolgroup_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/llama_stack_client/resources/tools.py b/src/llama_stack_client/resources/tools.py index cb7d434e..6ca15896 100644 --- a/src/llama_stack_client/resources/tools.py +++ b/src/llama_stack_client/resources/tools.py @@ -4,7 +4,7 @@ import httpx -from ..types import tool_get_params, tool_list_params +from ..types import tool_list_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven from .._utils import ( maybe_transform, @@ -21,6 +21,7 @@ ) from ..types.tool import Tool from .._base_client import make_request_options +from ..types.tool_list_response import ToolListResponse __all__ = ["ToolsResource", "AsyncToolsResource"] @@ -48,7 +49,7 @@ def with_streaming_response(self) -> ToolsResourceWithStreamingResponse: def list( self, *, - tool_group_id: str | NotGiven = NOT_GIVEN, + toolgroup_id: str | NotGiven = NOT_GIVEN, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -57,7 +58,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Tool: + ) -> ToolListResponse: """ List tools with optional tool group @@ -70,7 +71,6 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -81,21 +81,21 @@ def list( **(extra_headers or {}), } return self._get( - "/v1/tools/list", + "/v1/tools", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=maybe_transform({"tool_group_id": tool_group_id}, tool_list_params.ToolListParams), + query=maybe_transform({"toolgroup_id": toolgroup_id}, tool_list_params.ToolListParams), ), - cast_to=Tool, + cast_to=ToolListResponse, ) def get( self, - *, tool_name: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -115,6 +115,8 @@ def get( timeout: Override the client-level default timeout for this request, in seconds """ + if not tool_name: + raise ValueError(f"Expected a non-empty value for `tool_name` but received {tool_name!r}") extra_headers = { **strip_not_given( { @@ -125,13 +127,9 @@ def get( **(extra_headers or {}), } return self._get( - "/v1/tools/get", + f"/v1/tools/{tool_name}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"tool_name": tool_name}, tool_get_params.ToolGetParams), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Tool, ) @@ -160,7 +158,7 @@ def with_streaming_response(self) -> AsyncToolsResourceWithStreamingResponse: async def list( self, *, - tool_group_id: str | NotGiven = NOT_GIVEN, + toolgroup_id: str | NotGiven = NOT_GIVEN, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -169,7 +167,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Tool: + ) -> ToolListResponse: """ List tools with optional tool group @@ -182,7 +180,6 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -193,21 +190,21 @@ async def list( **(extra_headers or {}), } return await self._get( - "/v1/tools/list", + "/v1/tools", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=await async_maybe_transform({"tool_group_id": tool_group_id}, tool_list_params.ToolListParams), + query=await async_maybe_transform({"toolgroup_id": toolgroup_id}, tool_list_params.ToolListParams), ), - cast_to=Tool, + cast_to=ToolListResponse, ) async def get( self, - *, tool_name: str, + *, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -227,6 +224,8 @@ async def get( timeout: Override the client-level default timeout for this request, in seconds """ + if not tool_name: + raise ValueError(f"Expected a non-empty value for `tool_name` but received {tool_name!r}") extra_headers = { **strip_not_given( { @@ -237,13 +236,9 @@ async def get( **(extra_headers or {}), } return await self._get( - "/v1/tools/get", + f"/v1/tools/{tool_name}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform({"tool_name": tool_name}, tool_get_params.ToolGetParams), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Tool, ) diff --git a/src/llama_stack_client/types/__init__.py b/src/llama_stack_client/types/__init__.py index cb9cf18d..705e3b39 100644 --- a/src/llama_stack_client/types/__init__.py +++ b/src/llama_stack_client/types/__init__.py @@ -37,45 +37,41 @@ from .inference_step import InferenceStep as InferenceStep from .tool_def_param import ToolDefParam as ToolDefParam from .token_log_probs import TokenLogProbs as TokenLogProbs -from .tool_get_params import ToolGetParams as ToolGetParams from .shield_call_step import ShieldCallStep as ShieldCallStep from .span_with_status import SpanWithStatus as SpanWithStatus from .tool_list_params import ToolListParams as ToolListParams from .evaluate_response import EvaluateResponse as EvaluateResponse from .post_training_job import PostTrainingJob as PostTrainingJob +from .tool_list_response import ToolListResponse as ToolListResponse from .agent_create_params import AgentCreateParams as AgentCreateParams -from .agent_delete_params import AgentDeleteParams as AgentDeleteParams from .completion_response import CompletionResponse as CompletionResponse from .embeddings_response import EmbeddingsResponse as EmbeddingsResponse from .memory_query_params import MemoryQueryParams as MemoryQueryParams +from .model_list_response import ModelListResponse as ModelListResponse from .route_list_response import RouteListResponse as RouteListResponse from .run_shield_response import RunShieldResponse as RunShieldResponse from .tool_execution_step import ToolExecutionStep as ToolExecutionStep from .eval_run_eval_params import EvalRunEvalParams as EvalRunEvalParams from .memory_insert_params import MemoryInsertParams as MemoryInsertParams from .scoring_score_params import ScoringScoreParams as ScoringScoreParams -from .toolgroup_get_params import ToolgroupGetParams as ToolgroupGetParams +from .shield_list_response import ShieldListResponse as ShieldListResponse from .agent_create_response import AgentCreateResponse as AgentCreateResponse from .dataset_list_response import DatasetListResponse as DatasetListResponse from .memory_retrieval_step import MemoryRetrievalStep as MemoryRetrievalStep from .model_register_params import ModelRegisterParams as ModelRegisterParams -from .model_retrieve_params import ModelRetrieveParams as ModelRetrieveParams from .paginated_rows_result import PaginatedRowsResult as PaginatedRowsResult from .provider_list_response import ProviderListResponse as ProviderListResponse from .scoring_score_response import ScoringScoreResponse as ScoringScoreResponse from .shield_register_params import ShieldRegisterParams as ShieldRegisterParams -from .shield_retrieve_params import ShieldRetrieveParams as ShieldRetrieveParams from .tool_invocation_result import ToolInvocationResult as ToolInvocationResult from .dataset_register_params import DatasetRegisterParams as DatasetRegisterParams -from .dataset_retrieve_params import DatasetRetrieveParams as DatasetRetrieveParams -from .model_unregister_params import ModelUnregisterParams as ModelUnregisterParams +from .eval_task_list_response import EvalTaskListResponse as EvalTaskListResponse +from .toolgroup_list_response import ToolgroupListResponse as ToolgroupListResponse from .query_documents_response import QueryDocumentsResponse as QueryDocumentsResponse from .safety_run_shield_params import SafetyRunShieldParams as SafetyRunShieldParams from .dataset_retrieve_response import DatasetRetrieveResponse as DatasetRetrieveResponse -from .dataset_unregister_params import DatasetUnregisterParams as DatasetUnregisterParams from .eval_evaluate_rows_params import EvalEvaluateRowsParams as EvalEvaluateRowsParams from .eval_task_register_params import EvalTaskRegisterParams as EvalTaskRegisterParams -from .eval_task_retrieve_params import EvalTaskRetrieveParams as EvalTaskRetrieveParams from .memory_bank_list_response import MemoryBankListResponse as MemoryBankListResponse from .toolgroup_register_params import ToolgroupRegisterParams as ToolgroupRegisterParams from .scoring_score_batch_params import ScoringScoreBatchParams as ScoringScoreBatchParams @@ -83,23 +79,20 @@ from .inference_completion_params import InferenceCompletionParams as InferenceCompletionParams from .inference_embeddings_params import InferenceEmbeddingsParams as InferenceEmbeddingsParams from .memory_bank_register_params import MemoryBankRegisterParams as MemoryBankRegisterParams -from .memory_bank_retrieve_params import MemoryBankRetrieveParams as MemoryBankRetrieveParams -from .toolgroup_unregister_params import ToolgroupUnregisterParams as ToolgroupUnregisterParams from .datasetio_append_rows_params import DatasetioAppendRowsParams as DatasetioAppendRowsParams from .scoring_score_batch_response import ScoringScoreBatchResponse as ScoringScoreBatchResponse from .telemetry_query_spans_params import TelemetryQuerySpansParams as TelemetryQuerySpansParams from .inference_completion_response import InferenceCompletionResponse as InferenceCompletionResponse from .memory_bank_register_response import MemoryBankRegisterResponse as MemoryBankRegisterResponse from .memory_bank_retrieve_response import MemoryBankRetrieveResponse as MemoryBankRetrieveResponse -from .memory_bank_unregister_params import MemoryBankUnregisterParams as MemoryBankUnregisterParams from .telemetry_query_traces_params import TelemetryQueryTracesParams as TelemetryQueryTracesParams +from .scoring_function_list_response import ScoringFunctionListResponse as ScoringFunctionListResponse from .telemetry_get_span_tree_params import TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams from .telemetry_query_spans_response import TelemetryQuerySpansResponse as TelemetryQuerySpansResponse from .tool_runtime_list_tools_params import ToolRuntimeListToolsParams as ToolRuntimeListToolsParams from .tool_runtime_invoke_tool_params import ToolRuntimeInvokeToolParams as ToolRuntimeInvokeToolParams from .inference_chat_completion_params import InferenceChatCompletionParams as InferenceChatCompletionParams from .scoring_function_register_params import ScoringFunctionRegisterParams as ScoringFunctionRegisterParams -from .scoring_function_retrieve_params import ScoringFunctionRetrieveParams as ScoringFunctionRetrieveParams from .telemetry_get_span_tree_response import TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse from .batch_inference_completion_params import BatchInferenceCompletionParams as BatchInferenceCompletionParams from .inference_chat_completion_response import InferenceChatCompletionResponse as InferenceChatCompletionResponse diff --git a/src/llama_stack_client/types/agents/__init__.py b/src/llama_stack_client/types/agents/__init__.py index 013b9e88..4c70d9a6 100644 --- a/src/llama_stack_client/types/agents/__init__.py +++ b/src/llama_stack_client/types/agents/__init__.py @@ -5,11 +5,8 @@ from .turn import Turn as Turn from .session import Session as Session from .turn_create_params import TurnCreateParams as TurnCreateParams -from .step_retrieve_params import StepRetrieveParams as StepRetrieveParams from .turn_create_response import TurnCreateResponse as TurnCreateResponse -from .turn_retrieve_params import TurnRetrieveParams as TurnRetrieveParams from .session_create_params import SessionCreateParams as SessionCreateParams -from .session_delete_params import SessionDeleteParams as SessionDeleteParams from .step_retrieve_response import StepRetrieveResponse as StepRetrieveResponse from .session_create_response import SessionCreateResponse as SessionCreateResponse from .session_retrieve_params import SessionRetrieveParams as SessionRetrieveParams diff --git a/src/llama_stack_client/types/agents/session_create_params.py b/src/llama_stack_client/types/agents/session_create_params.py index 42b92f86..dc8762bc 100644 --- a/src/llama_stack_client/types/agents/session_create_params.py +++ b/src/llama_stack_client/types/agents/session_create_params.py @@ -10,8 +10,6 @@ class SessionCreateParams(TypedDict, total=False): - agent_id: Required[str] - session_name: Required[str] x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] diff --git a/src/llama_stack_client/types/agents/session_retrieve_params.py b/src/llama_stack_client/types/agents/session_retrieve_params.py index 7a4c3db9..df9cecf9 100644 --- a/src/llama_stack_client/types/agents/session_retrieve_params.py +++ b/src/llama_stack_client/types/agents/session_retrieve_params.py @@ -13,8 +13,6 @@ class SessionRetrieveParams(TypedDict, total=False): agent_id: Required[str] - session_id: Required[str] - turn_ids: List[str] x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] diff --git a/src/llama_stack_client/types/agents/turn_create_params.py b/src/llama_stack_client/types/agents/turn_create_params.py index 2d9cf7e2..e8058207 100644 --- a/src/llama_stack_client/types/agents/turn_create_params.py +++ b/src/llama_stack_client/types/agents/turn_create_params.py @@ -30,8 +30,6 @@ class TurnCreateParamsBase(TypedDict, total=False): messages: Required[Iterable[Message]] - session_id: Required[str] - documents: Iterable[Document] toolgroups: List[Toolgroup] diff --git a/src/llama_stack_client/types/dataset_list_response.py b/src/llama_stack_client/types/dataset_list_response.py index 2306f8ed..a0d59234 100644 --- a/src/llama_stack_client/types/dataset_list_response.py +++ b/src/llama_stack_client/types/dataset_list_response.py @@ -7,10 +7,10 @@ from .shared.url import URL from .shared.param_type import ParamType -__all__ = ["DatasetListResponse"] +__all__ = ["DatasetListResponse", "Data"] -class DatasetListResponse(BaseModel): +class Data(BaseModel): dataset_schema: Dict[str, ParamType] identifier: str @@ -24,3 +24,7 @@ class DatasetListResponse(BaseModel): type: Literal["dataset"] url: URL + + +class DatasetListResponse(BaseModel): + data: List[Data] diff --git a/src/llama_stack_client/types/eval/job_retrieve_params.py b/src/llama_stack_client/types/eval/job_retrieve_params.py index 1237c933..2278e423 100644 --- a/src/llama_stack_client/types/eval/job_retrieve_params.py +++ b/src/llama_stack_client/types/eval/job_retrieve_params.py @@ -10,8 +10,6 @@ class JobRetrieveParams(TypedDict, total=False): - job_id: Required[str] - task_id: Required[str] x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] diff --git a/src/llama_stack_client/types/eval/job_status_params.py b/src/llama_stack_client/types/eval/job_status_params.py index 2f8360cc..7dc72242 100644 --- a/src/llama_stack_client/types/eval/job_status_params.py +++ b/src/llama_stack_client/types/eval/job_status_params.py @@ -10,8 +10,6 @@ class JobStatusParams(TypedDict, total=False): - job_id: Required[str] - task_id: Required[str] x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] diff --git a/src/llama_stack_client/types/eval_task_list_response.py b/src/llama_stack_client/types/eval_task_list_response.py new file mode 100644 index 00000000..8f6db11d --- /dev/null +++ b/src/llama_stack_client/types/eval_task_list_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from .._models import BaseModel +from .eval_task import EvalTask + +__all__ = ["EvalTaskListResponse"] + + +class EvalTaskListResponse(BaseModel): + data: List[EvalTask] diff --git a/src/llama_stack_client/types/memory_bank_list_response.py b/src/llama_stack_client/types/memory_bank_list_response.py index ed7c1bbf..9ed4aa25 100644 --- a/src/llama_stack_client/types/memory_bank_list_response.py +++ b/src/llama_stack_client/types/memory_bank_list_response.py @@ -1,14 +1,21 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Union, Optional +from typing import List, Union, Optional from typing_extensions import Literal, TypeAlias from .._models import BaseModel -__all__ = ["MemoryBankListResponse", "VectorMemoryBank", "KeyValueMemoryBank", "KeywordMemoryBank", "GraphMemoryBank"] +__all__ = [ + "MemoryBankListResponse", + "Data", + "DataVectorMemoryBank", + "DataKeyValueMemoryBank", + "DataKeywordMemoryBank", + "DataGraphMemoryBank", +] -class VectorMemoryBank(BaseModel): +class DataVectorMemoryBank(BaseModel): chunk_size_in_tokens: int embedding_model: str @@ -28,7 +35,7 @@ class VectorMemoryBank(BaseModel): overlap_size_in_tokens: Optional[int] = None -class KeyValueMemoryBank(BaseModel): +class DataKeyValueMemoryBank(BaseModel): identifier: str memory_bank_type: Literal["keyvalue"] @@ -40,7 +47,7 @@ class KeyValueMemoryBank(BaseModel): type: Literal["memory_bank"] -class KeywordMemoryBank(BaseModel): +class DataKeywordMemoryBank(BaseModel): identifier: str memory_bank_type: Literal["keyword"] @@ -52,7 +59,7 @@ class KeywordMemoryBank(BaseModel): type: Literal["memory_bank"] -class GraphMemoryBank(BaseModel): +class DataGraphMemoryBank(BaseModel): identifier: str memory_bank_type: Literal["graph"] @@ -64,4 +71,8 @@ class GraphMemoryBank(BaseModel): type: Literal["memory_bank"] -MemoryBankListResponse: TypeAlias = Union[VectorMemoryBank, KeyValueMemoryBank, KeywordMemoryBank, GraphMemoryBank] +Data: TypeAlias = Union[DataVectorMemoryBank, DataKeyValueMemoryBank, DataKeywordMemoryBank, DataGraphMemoryBank] + + +class MemoryBankListResponse(BaseModel): + data: List[Data] diff --git a/src/llama_stack_client/types/model_list_response.py b/src/llama_stack_client/types/model_list_response.py index 6fb1a940..0119042c 100644 --- a/src/llama_stack_client/types/model_list_response.py +++ b/src/llama_stack_client/types/model_list_response.py @@ -1,23 +1,12 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict, List, Union +from typing import List +from .model import Model from .._models import BaseModel -__all__ = ["ModelListResponse", "ProviderConfig"] - - -class ProviderConfig(BaseModel): - config: Dict[str, Union[bool, float, str, List[object], object, None]] - - provider_type: str +__all__ = ["ModelListResponse"] class ModelListResponse(BaseModel): - llama_model: object - """ - The model family and SKU of the model along with other parameters corresponding - to the model. - """ - - provider_config: ProviderConfig + data: List[Model] diff --git a/src/llama_stack_client/types/post_training/__init__.py b/src/llama_stack_client/types/post_training/__init__.py index 67f6eefe..d5472d43 100644 --- a/src/llama_stack_client/types/post_training/__init__.py +++ b/src/llama_stack_client/types/post_training/__init__.py @@ -3,6 +3,7 @@ from __future__ import annotations from .job_cancel_params import JobCancelParams as JobCancelParams +from .job_list_response import JobListResponse as JobListResponse from .job_status_params import JobStatusParams as JobStatusParams from .job_status_response import JobStatusResponse as JobStatusResponse from .job_artifacts_params import JobArtifactsParams as JobArtifactsParams diff --git a/src/llama_stack_client/types/post_training/job_list_response.py b/src/llama_stack_client/types/post_training/job_list_response.py new file mode 100644 index 00000000..7700ec9f --- /dev/null +++ b/src/llama_stack_client/types/post_training/job_list_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ..._models import BaseModel + +__all__ = ["JobListResponse", "Data"] + + +class Data(BaseModel): + job_uuid: str + + +class JobListResponse(BaseModel): + data: List[Data] diff --git a/src/llama_stack_client/types/provider_list_response.py b/src/llama_stack_client/types/provider_list_response.py index 29005a86..e02b24a2 100644 --- a/src/llama_stack_client/types/provider_list_response.py +++ b/src/llama_stack_client/types/provider_list_response.py @@ -1,10 +1,12 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict -from typing_extensions import TypeAlias +from typing import List +from .._models import BaseModel from .provider_info import ProviderInfo __all__ = ["ProviderListResponse"] -ProviderListResponse: TypeAlias = Dict[str, ProviderInfo] + +class ProviderListResponse(BaseModel): + data: List[ProviderInfo] diff --git a/src/llama_stack_client/types/scoring_function_list_response.py b/src/llama_stack_client/types/scoring_function_list_response.py index c1dd9d4f..470f5006 100644 --- a/src/llama_stack_client/types/scoring_function_list_response.py +++ b/src/llama_stack_client/types/scoring_function_list_response.py @@ -1,84 +1,12 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict, List, Union, Optional -from typing_extensions import Literal, TypeAlias +from typing import List from .._models import BaseModel +from .scoring_fn import ScoringFn -__all__ = [ - "ScoringFunctionListResponse", - "Parameter", - "ParameterType", - "ParameterTypeType", - "ReturnType", - "ReturnTypeType", - "Context", -] - - -class ParameterTypeType(BaseModel): - type: Literal["string"] - - -ParameterType: TypeAlias = Union[ - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, - ParameterTypeType, -] - - -class Parameter(BaseModel): - name: str - - type: ParameterType - - description: Optional[str] = None - - -class ReturnTypeType(BaseModel): - type: Literal["string"] - - -ReturnType: TypeAlias = Union[ - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, - ReturnTypeType, -] - - -class Context(BaseModel): - judge_model: str - - judge_score_regex: Optional[List[str]] = None - - prompt_template: Optional[str] = None +__all__ = ["ScoringFunctionListResponse"] class ScoringFunctionListResponse(BaseModel): - identifier: str - - metadata: Dict[str, Union[bool, float, str, List[object], object, None]] - - parameters: List[Parameter] - - provider_id: str - - return_type: ReturnType - - context: Optional[Context] = None - - description: Optional[str] = None + data: List[ScoringFn] diff --git a/src/llama_stack_client/types/shared/sampling_params.py b/src/llama_stack_client/types/shared/sampling_params.py index 276de1d2..48fbe544 100644 --- a/src/llama_stack_client/types/shared/sampling_params.py +++ b/src/llama_stack_client/types/shared/sampling_params.py @@ -1,22 +1,43 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional -from typing_extensions import Literal +from typing import Union, Optional +from typing_extensions import Literal, TypeAlias from ..._models import BaseModel -__all__ = ["SamplingParams"] +__all__ = [ + "SamplingParams", + "Strategy", + "StrategyGreedySamplingStrategy", + "StrategyTopPSamplingStrategy", + "StrategyTopKSamplingStrategy", +] -class SamplingParams(BaseModel): - strategy: Literal["greedy", "top_p", "top_k"] +class StrategyGreedySamplingStrategy(BaseModel): + type: Literal["greedy"] - max_tokens: Optional[int] = None - repetition_penalty: Optional[float] = None +class StrategyTopPSamplingStrategy(BaseModel): + type: Literal["top_p"] temperature: Optional[float] = None - top_k: Optional[int] = None - top_p: Optional[float] = None + + +class StrategyTopKSamplingStrategy(BaseModel): + top_k: int + + type: Literal["top_k"] + + +Strategy: TypeAlias = Union[StrategyGreedySamplingStrategy, StrategyTopPSamplingStrategy, StrategyTopKSamplingStrategy] + + +class SamplingParams(BaseModel): + strategy: Strategy + + max_tokens: Optional[int] = None + + repetition_penalty: Optional[float] = None diff --git a/src/llama_stack_client/types/shared_params/sampling_params.py b/src/llama_stack_client/types/shared_params/sampling_params.py index 3890df01..1d9bcaf5 100644 --- a/src/llama_stack_client/types/shared_params/sampling_params.py +++ b/src/llama_stack_client/types/shared_params/sampling_params.py @@ -2,20 +2,42 @@ from __future__ import annotations -from typing_extensions import Literal, Required, TypedDict +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict -__all__ = ["SamplingParams"] +__all__ = [ + "SamplingParams", + "Strategy", + "StrategyGreedySamplingStrategy", + "StrategyTopPSamplingStrategy", + "StrategyTopKSamplingStrategy", +] -class SamplingParams(TypedDict, total=False): - strategy: Required[Literal["greedy", "top_p", "top_k"]] +class StrategyGreedySamplingStrategy(TypedDict, total=False): + type: Required[Literal["greedy"]] - max_tokens: int - repetition_penalty: float +class StrategyTopPSamplingStrategy(TypedDict, total=False): + type: Required[Literal["top_p"]] temperature: float - top_k: int - top_p: float + + +class StrategyTopKSamplingStrategy(TypedDict, total=False): + top_k: Required[int] + + type: Required[Literal["top_k"]] + + +Strategy: TypeAlias = Union[StrategyGreedySamplingStrategy, StrategyTopPSamplingStrategy, StrategyTopKSamplingStrategy] + + +class SamplingParams(TypedDict, total=False): + strategy: Required[Strategy] + + max_tokens: int + + repetition_penalty: float diff --git a/src/llama_stack_client/types/shield_list_response.py b/src/llama_stack_client/types/shield_list_response.py index 9f375cbf..0dd45e40 100644 --- a/src/llama_stack_client/types/shield_list_response.py +++ b/src/llama_stack_client/types/shield_list_response.py @@ -1,19 +1,12 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict, List, Union +from typing import List +from .shield import Shield from .._models import BaseModel -__all__ = ["ShieldListResponse", "ProviderConfig"] - - -class ProviderConfig(BaseModel): - config: Dict[str, Union[bool, float, str, List[object], object, None]] - - provider_type: str +__all__ = ["ShieldListResponse"] class ShieldListResponse(BaseModel): - provider_config: ProviderConfig - - shield_type: str + data: List[Shield] diff --git a/src/llama_stack_client/types/telemetry_get_span_tree_params.py b/src/llama_stack_client/types/telemetry_get_span_tree_params.py index 1ded8f7c..310cf7ef 100644 --- a/src/llama_stack_client/types/telemetry_get_span_tree_params.py +++ b/src/llama_stack_client/types/telemetry_get_span_tree_params.py @@ -13,10 +13,10 @@ class TelemetryGetSpanTreeParams(TypedDict, total=False): span_id: Required[str] - max_depth: int - attributes_to_return: List[str] + max_depth: int + x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/tool_list_params.py b/src/llama_stack_client/types/tool_list_params.py index 03aacb80..bfebd79d 100644 --- a/src/llama_stack_client/types/tool_list_params.py +++ b/src/llama_stack_client/types/tool_list_params.py @@ -10,7 +10,7 @@ class ToolListParams(TypedDict, total=False): - tool_group_id: str + toolgroup_id: str x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] diff --git a/src/llama_stack_client/types/tool_list_response.py b/src/llama_stack_client/types/tool_list_response.py new file mode 100644 index 00000000..0aa622db --- /dev/null +++ b/src/llama_stack_client/types/tool_list_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from .tool import Tool +from .._models import BaseModel + +__all__ = ["ToolListResponse"] + + +class ToolListResponse(BaseModel): + data: List[Tool] diff --git a/src/llama_stack_client/types/tool_runtime_list_tools_params.py b/src/llama_stack_client/types/tool_runtime_list_tools_params.py index fb273dd8..c7c96791 100644 --- a/src/llama_stack_client/types/tool_runtime_list_tools_params.py +++ b/src/llama_stack_client/types/tool_runtime_list_tools_params.py @@ -11,10 +11,10 @@ class ToolRuntimeListToolsParams(TypedDict, total=False): - tool_group_id: str - mcp_endpoint: URL + tool_group_id: str + x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/toolgroup_list_response.py b/src/llama_stack_client/types/toolgroup_list_response.py new file mode 100644 index 00000000..8d297d80 --- /dev/null +++ b/src/llama_stack_client/types/toolgroup_list_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from .._models import BaseModel +from .tool_group import ToolGroup + +__all__ = ["ToolgroupListResponse"] + + +class ToolgroupListResponse(BaseModel): + data: List[ToolGroup] diff --git a/tests/api_resources/agents/test_session.py b/tests/api_resources/agents/test_session.py index 4723deaf..7ee27386 100644 --- a/tests/api_resources/agents/test_session.py +++ b/tests/api_resources/agents/test_session.py @@ -64,19 +64,27 @@ def test_streaming_response_create(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_create(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + client.agents.session.with_raw_response.create( + agent_id="", + session_name="session_name", + ) + @parametrize def test_method_retrieve(self, client: LlamaStackClient) -> None: session = client.agents.session.retrieve( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", ) assert_matches_type(Session, session, path=["response"]) @parametrize def test_method_retrieve_with_all_params(self, client: LlamaStackClient) -> None: session = client.agents.session.retrieve( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", turn_ids=["string"], x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", @@ -86,8 +94,8 @@ def test_method_retrieve_with_all_params(self, client: LlamaStackClient) -> None @parametrize def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: response = client.agents.session.with_raw_response.retrieve( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", ) assert response.is_closed is True @@ -98,8 +106,8 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: @parametrize def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: with client.agents.session.with_streaming_response.retrieve( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -109,19 +117,33 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_retrieve(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + client.agents.session.with_raw_response.retrieve( + session_id="session_id", + agent_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"): + client.agents.session.with_raw_response.retrieve( + session_id="", + agent_id="agent_id", + ) + @parametrize def test_method_delete(self, client: LlamaStackClient) -> None: session = client.agents.session.delete( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", ) assert session is None @parametrize def test_method_delete_with_all_params(self, client: LlamaStackClient) -> None: session = client.agents.session.delete( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -130,8 +152,8 @@ def test_method_delete_with_all_params(self, client: LlamaStackClient) -> None: @parametrize def test_raw_response_delete(self, client: LlamaStackClient) -> None: response = client.agents.session.with_raw_response.delete( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", ) assert response.is_closed is True @@ -142,8 +164,8 @@ def test_raw_response_delete(self, client: LlamaStackClient) -> None: @parametrize def test_streaming_response_delete(self, client: LlamaStackClient) -> None: with client.agents.session.with_streaming_response.delete( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -153,6 +175,20 @@ def test_streaming_response_delete(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_delete(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + client.agents.session.with_raw_response.delete( + session_id="session_id", + agent_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"): + client.agents.session.with_raw_response.delete( + session_id="", + agent_id="agent_id", + ) + class TestAsyncSession: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -201,19 +237,27 @@ async def test_streaming_response_create(self, async_client: AsyncLlamaStackClie assert cast(Any, response.is_closed) is True + @parametrize + async def test_path_params_create(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + await async_client.agents.session.with_raw_response.create( + agent_id="", + session_name="session_name", + ) + @parametrize async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None: session = await async_client.agents.session.retrieve( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", ) assert_matches_type(Session, session, path=["response"]) @parametrize async def test_method_retrieve_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: session = await async_client.agents.session.retrieve( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", turn_ids=["string"], x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", @@ -223,8 +267,8 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncLlamaSta @parametrize async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.agents.session.with_raw_response.retrieve( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", ) assert response.is_closed is True @@ -235,8 +279,8 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.agents.session.with_streaming_response.retrieve( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -246,19 +290,33 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl assert cast(Any, response.is_closed) is True + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + await async_client.agents.session.with_raw_response.retrieve( + session_id="session_id", + agent_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"): + await async_client.agents.session.with_raw_response.retrieve( + session_id="", + agent_id="agent_id", + ) + @parametrize async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None: session = await async_client.agents.session.delete( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", ) assert session is None @parametrize async def test_method_delete_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: session = await async_client.agents.session.delete( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -267,8 +325,8 @@ async def test_method_delete_with_all_params(self, async_client: AsyncLlamaStack @parametrize async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.agents.session.with_raw_response.delete( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", ) assert response.is_closed is True @@ -279,8 +337,8 @@ async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) -> @parametrize async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.agents.session.with_streaming_response.delete( - agent_id="agent_id", session_id="session_id", + agent_id="agent_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -289,3 +347,17 @@ async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClie assert session is None assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + await async_client.agents.session.with_raw_response.delete( + session_id="session_id", + agent_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"): + await async_client.agents.session.with_raw_response.delete( + session_id="", + agent_id="agent_id", + ) diff --git a/tests/api_resources/agents/test_steps.py b/tests/api_resources/agents/test_steps.py index 31b6cf48..8315f896 100644 --- a/tests/api_resources/agents/test_steps.py +++ b/tests/api_resources/agents/test_steps.py @@ -20,9 +20,9 @@ class TestSteps: @parametrize def test_method_retrieve(self, client: LlamaStackClient) -> None: step = client.agents.steps.retrieve( + step_id="step_id", agent_id="agent_id", session_id="session_id", - step_id="step_id", turn_id="turn_id", ) assert_matches_type(StepRetrieveResponse, step, path=["response"]) @@ -30,9 +30,9 @@ def test_method_retrieve(self, client: LlamaStackClient) -> None: @parametrize def test_method_retrieve_with_all_params(self, client: LlamaStackClient) -> None: step = client.agents.steps.retrieve( + step_id="step_id", agent_id="agent_id", session_id="session_id", - step_id="step_id", turn_id="turn_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", @@ -42,9 +42,9 @@ def test_method_retrieve_with_all_params(self, client: LlamaStackClient) -> None @parametrize def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: response = client.agents.steps.with_raw_response.retrieve( + step_id="step_id", agent_id="agent_id", session_id="session_id", - step_id="step_id", turn_id="turn_id", ) @@ -56,9 +56,9 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: @parametrize def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: with client.agents.steps.with_streaming_response.retrieve( + step_id="step_id", agent_id="agent_id", session_id="session_id", - step_id="step_id", turn_id="turn_id", ) as response: assert not response.is_closed @@ -69,6 +69,40 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_retrieve(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + client.agents.steps.with_raw_response.retrieve( + step_id="step_id", + agent_id="", + session_id="session_id", + turn_id="turn_id", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"): + client.agents.steps.with_raw_response.retrieve( + step_id="step_id", + agent_id="agent_id", + session_id="", + turn_id="turn_id", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `turn_id` but received ''"): + client.agents.steps.with_raw_response.retrieve( + step_id="step_id", + agent_id="agent_id", + session_id="session_id", + turn_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"): + client.agents.steps.with_raw_response.retrieve( + step_id="", + agent_id="agent_id", + session_id="session_id", + turn_id="turn_id", + ) + class TestAsyncSteps: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -76,9 +110,9 @@ class TestAsyncSteps: @parametrize async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None: step = await async_client.agents.steps.retrieve( + step_id="step_id", agent_id="agent_id", session_id="session_id", - step_id="step_id", turn_id="turn_id", ) assert_matches_type(StepRetrieveResponse, step, path=["response"]) @@ -86,9 +120,9 @@ async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> Non @parametrize async def test_method_retrieve_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: step = await async_client.agents.steps.retrieve( + step_id="step_id", agent_id="agent_id", session_id="session_id", - step_id="step_id", turn_id="turn_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", @@ -98,9 +132,9 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncLlamaSta @parametrize async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.agents.steps.with_raw_response.retrieve( + step_id="step_id", agent_id="agent_id", session_id="session_id", - step_id="step_id", turn_id="turn_id", ) @@ -112,9 +146,9 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.agents.steps.with_streaming_response.retrieve( + step_id="step_id", agent_id="agent_id", session_id="session_id", - step_id="step_id", turn_id="turn_id", ) as response: assert not response.is_closed @@ -124,3 +158,37 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl assert_matches_type(StepRetrieveResponse, step, path=["response"]) assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + await async_client.agents.steps.with_raw_response.retrieve( + step_id="step_id", + agent_id="", + session_id="session_id", + turn_id="turn_id", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"): + await async_client.agents.steps.with_raw_response.retrieve( + step_id="step_id", + agent_id="agent_id", + session_id="", + turn_id="turn_id", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `turn_id` but received ''"): + await async_client.agents.steps.with_raw_response.retrieve( + step_id="step_id", + agent_id="agent_id", + session_id="session_id", + turn_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"): + await async_client.agents.steps.with_raw_response.retrieve( + step_id="", + agent_id="agent_id", + session_id="session_id", + turn_id="turn_id", + ) diff --git a/tests/api_resources/agents/test_turn.py b/tests/api_resources/agents/test_turn.py index 4a3d8304..fcfe6a4a 100644 --- a/tests/api_resources/agents/test_turn.py +++ b/tests/api_resources/agents/test_turn.py @@ -23,6 +23,7 @@ class TestTurn: @parametrize def test_method_create_overload_1(self, client: LlamaStackClient) -> None: turn = client.agents.turn.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -30,7 +31,6 @@ def test_method_create_overload_1(self, client: LlamaStackClient) -> None: "role": "user", } ], - session_id="session_id", ) assert_matches_type(TurnCreateResponse, turn, path=["response"]) @@ -40,6 +40,7 @@ def test_method_create_overload_1(self, client: LlamaStackClient) -> None: @parametrize def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient) -> None: turn = client.agents.turn.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -48,7 +49,6 @@ def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient "context": "string", } ], - session_id="session_id", documents=[ { "content": "string", @@ -68,6 +68,7 @@ def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient @parametrize def test_raw_response_create_overload_1(self, client: LlamaStackClient) -> None: response = client.agents.turn.with_raw_response.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -75,7 +76,6 @@ def test_raw_response_create_overload_1(self, client: LlamaStackClient) -> None: "role": "user", } ], - session_id="session_id", ) assert response.is_closed is True @@ -89,6 +89,7 @@ def test_raw_response_create_overload_1(self, client: LlamaStackClient) -> None: @parametrize def test_streaming_response_create_overload_1(self, client: LlamaStackClient) -> None: with client.agents.turn.with_streaming_response.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -96,7 +97,6 @@ def test_streaming_response_create_overload_1(self, client: LlamaStackClient) -> "role": "user", } ], - session_id="session_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -106,12 +106,42 @@ def test_streaming_response_create_overload_1(self, client: LlamaStackClient) -> assert cast(Any, response.is_closed) is True + @pytest.mark.skip( + reason="currently no good way to test endpoints with content type text/event-stream, Prism mock server will fail" + ) + @parametrize + def test_path_params_create_overload_1(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + client.agents.turn.with_raw_response.create( + session_id="session_id", + agent_id="", + messages=[ + { + "content": "string", + "role": "user", + } + ], + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"): + client.agents.turn.with_raw_response.create( + session_id="", + agent_id="agent_id", + messages=[ + { + "content": "string", + "role": "user", + } + ], + ) + @pytest.mark.skip( reason="currently no good way to test endpoints with content type text/event-stream, Prism mock server will fail" ) @parametrize def test_method_create_overload_2(self, client: LlamaStackClient) -> None: turn_stream = client.agents.turn.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -119,7 +149,6 @@ def test_method_create_overload_2(self, client: LlamaStackClient) -> None: "role": "user", } ], - session_id="session_id", stream=True, ) turn_stream.response.close() @@ -130,6 +159,7 @@ def test_method_create_overload_2(self, client: LlamaStackClient) -> None: @parametrize def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient) -> None: turn_stream = client.agents.turn.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -138,7 +168,6 @@ def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient "context": "string", } ], - session_id="session_id", stream=True, documents=[ { @@ -158,6 +187,7 @@ def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient @parametrize def test_raw_response_create_overload_2(self, client: LlamaStackClient) -> None: response = client.agents.turn.with_raw_response.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -165,7 +195,6 @@ def test_raw_response_create_overload_2(self, client: LlamaStackClient) -> None: "role": "user", } ], - session_id="session_id", stream=True, ) @@ -179,6 +208,7 @@ def test_raw_response_create_overload_2(self, client: LlamaStackClient) -> None: @parametrize def test_streaming_response_create_overload_2(self, client: LlamaStackClient) -> None: with client.agents.turn.with_streaming_response.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -186,7 +216,6 @@ def test_streaming_response_create_overload_2(self, client: LlamaStackClient) -> "role": "user", } ], - session_id="session_id", stream=True, ) as response: assert not response.is_closed @@ -197,21 +226,52 @@ def test_streaming_response_create_overload_2(self, client: LlamaStackClient) -> assert cast(Any, response.is_closed) is True + @pytest.mark.skip( + reason="currently no good way to test endpoints with content type text/event-stream, Prism mock server will fail" + ) + @parametrize + def test_path_params_create_overload_2(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + client.agents.turn.with_raw_response.create( + session_id="session_id", + agent_id="", + messages=[ + { + "content": "string", + "role": "user", + } + ], + stream=True, + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"): + client.agents.turn.with_raw_response.create( + session_id="", + agent_id="agent_id", + messages=[ + { + "content": "string", + "role": "user", + } + ], + stream=True, + ) + @parametrize def test_method_retrieve(self, client: LlamaStackClient) -> None: turn = client.agents.turn.retrieve( + turn_id="turn_id", agent_id="agent_id", session_id="session_id", - turn_id="turn_id", ) assert_matches_type(Turn, turn, path=["response"]) @parametrize def test_method_retrieve_with_all_params(self, client: LlamaStackClient) -> None: turn = client.agents.turn.retrieve( + turn_id="turn_id", agent_id="agent_id", session_id="session_id", - turn_id="turn_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -220,9 +280,9 @@ def test_method_retrieve_with_all_params(self, client: LlamaStackClient) -> None @parametrize def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: response = client.agents.turn.with_raw_response.retrieve( + turn_id="turn_id", agent_id="agent_id", session_id="session_id", - turn_id="turn_id", ) assert response.is_closed is True @@ -233,9 +293,9 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: @parametrize def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: with client.agents.turn.with_streaming_response.retrieve( + turn_id="turn_id", agent_id="agent_id", session_id="session_id", - turn_id="turn_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -245,6 +305,29 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_retrieve(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + client.agents.turn.with_raw_response.retrieve( + turn_id="turn_id", + agent_id="", + session_id="session_id", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"): + client.agents.turn.with_raw_response.retrieve( + turn_id="turn_id", + agent_id="agent_id", + session_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `turn_id` but received ''"): + client.agents.turn.with_raw_response.retrieve( + turn_id="", + agent_id="agent_id", + session_id="session_id", + ) + class TestAsyncTurn: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -255,6 +338,7 @@ class TestAsyncTurn: @parametrize async def test_method_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None: turn = await async_client.agents.turn.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -262,7 +346,6 @@ async def test_method_create_overload_1(self, async_client: AsyncLlamaStackClien "role": "user", } ], - session_id="session_id", ) assert_matches_type(TurnCreateResponse, turn, path=["response"]) @@ -272,6 +355,7 @@ async def test_method_create_overload_1(self, async_client: AsyncLlamaStackClien @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncLlamaStackClient) -> None: turn = await async_client.agents.turn.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -280,7 +364,6 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "context": "string", } ], - session_id="session_id", documents=[ { "content": "string", @@ -300,6 +383,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.agents.turn.with_raw_response.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -307,7 +391,6 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncLlamaStac "role": "user", } ], - session_id="session_id", ) assert response.is_closed is True @@ -321,6 +404,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncLlamaStac @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.agents.turn.with_streaming_response.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -328,7 +412,6 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncLla "role": "user", } ], - session_id="session_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -338,12 +421,42 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncLla assert cast(Any, response.is_closed) is True + @pytest.mark.skip( + reason="currently no good way to test endpoints with content type text/event-stream, Prism mock server will fail" + ) + @parametrize + async def test_path_params_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + await async_client.agents.turn.with_raw_response.create( + session_id="session_id", + agent_id="", + messages=[ + { + "content": "string", + "role": "user", + } + ], + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"): + await async_client.agents.turn.with_raw_response.create( + session_id="", + agent_id="agent_id", + messages=[ + { + "content": "string", + "role": "user", + } + ], + ) + @pytest.mark.skip( reason="currently no good way to test endpoints with content type text/event-stream, Prism mock server will fail" ) @parametrize async def test_method_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None: turn_stream = await async_client.agents.turn.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -351,7 +464,6 @@ async def test_method_create_overload_2(self, async_client: AsyncLlamaStackClien "role": "user", } ], - session_id="session_id", stream=True, ) await turn_stream.response.aclose() @@ -362,6 +474,7 @@ async def test_method_create_overload_2(self, async_client: AsyncLlamaStackClien @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncLlamaStackClient) -> None: turn_stream = await async_client.agents.turn.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -370,7 +483,6 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "context": "string", } ], - session_id="session_id", stream=True, documents=[ { @@ -390,6 +502,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.agents.turn.with_raw_response.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -397,7 +510,6 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncLlamaStac "role": "user", } ], - session_id="session_id", stream=True, ) @@ -411,6 +523,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncLlamaStac @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.agents.turn.with_streaming_response.create( + session_id="session_id", agent_id="agent_id", messages=[ { @@ -418,7 +531,6 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncLla "role": "user", } ], - session_id="session_id", stream=True, ) as response: assert not response.is_closed @@ -429,21 +541,52 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncLla assert cast(Any, response.is_closed) is True + @pytest.mark.skip( + reason="currently no good way to test endpoints with content type text/event-stream, Prism mock server will fail" + ) + @parametrize + async def test_path_params_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + await async_client.agents.turn.with_raw_response.create( + session_id="session_id", + agent_id="", + messages=[ + { + "content": "string", + "role": "user", + } + ], + stream=True, + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"): + await async_client.agents.turn.with_raw_response.create( + session_id="", + agent_id="agent_id", + messages=[ + { + "content": "string", + "role": "user", + } + ], + stream=True, + ) + @parametrize async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None: turn = await async_client.agents.turn.retrieve( + turn_id="turn_id", agent_id="agent_id", session_id="session_id", - turn_id="turn_id", ) assert_matches_type(Turn, turn, path=["response"]) @parametrize async def test_method_retrieve_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: turn = await async_client.agents.turn.retrieve( + turn_id="turn_id", agent_id="agent_id", session_id="session_id", - turn_id="turn_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -452,9 +595,9 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncLlamaSta @parametrize async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.agents.turn.with_raw_response.retrieve( + turn_id="turn_id", agent_id="agent_id", session_id="session_id", - turn_id="turn_id", ) assert response.is_closed is True @@ -465,9 +608,9 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.agents.turn.with_streaming_response.retrieve( + turn_id="turn_id", agent_id="agent_id", session_id="session_id", - turn_id="turn_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -476,3 +619,26 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl assert_matches_type(Turn, turn, path=["response"]) assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + await async_client.agents.turn.with_raw_response.retrieve( + turn_id="turn_id", + agent_id="", + session_id="session_id", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"): + await async_client.agents.turn.with_raw_response.retrieve( + turn_id="turn_id", + agent_id="agent_id", + session_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `turn_id` but received ''"): + await async_client.agents.turn.with_raw_response.retrieve( + turn_id="", + agent_id="agent_id", + session_id="session_id", + ) diff --git a/tests/api_resources/eval/test_jobs.py b/tests/api_resources/eval/test_jobs.py index 270da0c9..3b2faa2d 100644 --- a/tests/api_resources/eval/test_jobs.py +++ b/tests/api_resources/eval/test_jobs.py @@ -62,6 +62,14 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_retrieve(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): + client.eval.jobs.with_raw_response.retrieve( + job_id="", + task_id="task_id", + ) + @parametrize def test_method_cancel(self, client: LlamaStackClient) -> None: job = client.eval.jobs.cancel( @@ -150,6 +158,14 @@ def test_streaming_response_status(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_status(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): + client.eval.jobs.with_raw_response.status( + job_id="", + task_id="task_id", + ) + class TestAsyncJobs: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -198,6 +214,14 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl assert cast(Any, response.is_closed) is True + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): + await async_client.eval.jobs.with_raw_response.retrieve( + job_id="", + task_id="task_id", + ) + @parametrize async def test_method_cancel(self, async_client: AsyncLlamaStackClient) -> None: job = await async_client.eval.jobs.cancel( @@ -285,3 +309,11 @@ async def test_streaming_response_status(self, async_client: AsyncLlamaStackClie assert_matches_type(Optional[JobStatusResponse], job, path=["response"]) assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_status(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): + await async_client.eval.jobs.with_raw_response.status( + job_id="", + task_id="task_id", + ) diff --git a/tests/api_resources/post_training/test_job.py b/tests/api_resources/post_training/test_job.py index 6bb92e39..72f0af05 100644 --- a/tests/api_resources/post_training/test_job.py +++ b/tests/api_resources/post_training/test_job.py @@ -9,8 +9,8 @@ from tests.utils import assert_matches_type from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import PostTrainingJob from llama_stack_client.types.post_training import ( + JobListResponse, JobStatusResponse, JobArtifactsResponse, ) @@ -21,28 +21,19 @@ class TestJob: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_list(self, client: LlamaStackClient) -> None: job = client.post_training.job.list() - assert_matches_type(PostTrainingJob, job, path=["response"]) + assert_matches_type(JobListResponse, job, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: job = client.post_training.job.list( x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(PostTrainingJob, job, path=["response"]) + assert_matches_type(JobListResponse, job, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_raw_response_list(self, client: LlamaStackClient) -> None: response = client.post_training.job.with_raw_response.list() @@ -50,11 +41,8 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() - assert_matches_type(PostTrainingJob, job, path=["response"]) + assert_matches_type(JobListResponse, job, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_streaming_response_list(self, client: LlamaStackClient) -> None: with client.post_training.job.with_streaming_response.list() as response: @@ -62,7 +50,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() - assert_matches_type(PostTrainingJob, job, path=["response"]) + assert_matches_type(JobListResponse, job, path=["response"]) assert cast(Any, response.is_closed) is True @@ -190,28 +178,19 @@ def test_streaming_response_status(self, client: LlamaStackClient) -> None: class TestAsyncJob: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: job = await async_client.post_training.job.list() - assert_matches_type(PostTrainingJob, job, path=["response"]) + assert_matches_type(JobListResponse, job, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: job = await async_client.post_training.job.list( x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(PostTrainingJob, job, path=["response"]) + assert_matches_type(JobListResponse, job, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.post_training.job.with_raw_response.list() @@ -219,11 +198,8 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = await response.parse() - assert_matches_type(PostTrainingJob, job, path=["response"]) + assert_matches_type(JobListResponse, job, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.post_training.job.with_streaming_response.list() as response: @@ -231,7 +207,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = await response.parse() - assert_matches_type(PostTrainingJob, job, path=["response"]) + assert_matches_type(JobListResponse, job, path=["response"]) assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 61ed3109..54610c4a 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -56,12 +56,9 @@ def test_method_create_with_all_params(self, client: LlamaStackClient) -> None: "input_shields": ["string"], "output_shields": ["string"], "sampling_params": { - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, "tool_choice": "auto", "tool_prompt_format": "json", @@ -146,6 +143,13 @@ def test_streaming_response_delete(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_delete(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + client.agents.with_raw_response.delete( + agent_id="", + ) + class TestAsyncAgents: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -189,12 +193,9 @@ async def test_method_create_with_all_params(self, async_client: AsyncLlamaStack "input_shields": ["string"], "output_shields": ["string"], "sampling_params": { - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, "tool_choice": "auto", "tool_prompt_format": "json", @@ -278,3 +279,10 @@ async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClie assert agent is None assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_id` but received ''"): + await async_client.agents.with_raw_response.delete( + agent_id="", + ) diff --git a/tests/api_resources/test_batch_inference.py b/tests/api_resources/test_batch_inference.py index 1cda11fc..1bb67ca9 100644 --- a/tests/api_resources/test_batch_inference.py +++ b/tests/api_resources/test_batch_inference.py @@ -50,12 +50,9 @@ def test_method_chat_completion_with_all_params(self, client: LlamaStackClient) model="model", logprobs={"top_k": 0}, sampling_params={ - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, tool_choice="auto", tool_prompt_format="json", @@ -133,12 +130,9 @@ def test_method_completion_with_all_params(self, client: LlamaStackClient) -> No model="model", logprobs={"top_k": 0}, sampling_params={ - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", @@ -205,12 +199,9 @@ async def test_method_chat_completion_with_all_params(self, async_client: AsyncL model="model", logprobs={"top_k": 0}, sampling_params={ - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, tool_choice="auto", tool_prompt_format="json", @@ -288,12 +279,9 @@ async def test_method_completion_with_all_params(self, async_client: AsyncLlamaS model="model", logprobs={"top_k": 0}, sampling_params={ - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", diff --git a/tests/api_resources/test_datasets.py b/tests/api_resources/test_datasets.py index e716f34b..98c903ad 100644 --- a/tests/api_resources/test_datasets.py +++ b/tests/api_resources/test_datasets.py @@ -9,10 +9,7 @@ from tests.utils import assert_matches_type from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import ( - DatasetListResponse, - DatasetRetrieveResponse, -) +from llama_stack_client.types import DatasetListResponse, DatasetRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -60,17 +57,18 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) + @parametrize + def test_path_params_retrieve(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"): + client.datasets.with_raw_response.retrieve( + dataset_id="", + ) + @parametrize def test_method_list(self, client: LlamaStackClient) -> None: dataset = client.datasets.list() assert_matches_type(DatasetListResponse, dataset, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: dataset = client.datasets.list( @@ -79,9 +77,6 @@ def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: ) assert_matches_type(DatasetListResponse, dataset, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_raw_response_list(self, client: LlamaStackClient) -> None: response = client.datasets.with_raw_response.list() @@ -91,9 +86,6 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None: dataset = response.parse() assert_matches_type(DatasetListResponse, dataset, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_streaming_response_list(self, client: LlamaStackClient) -> None: with client.datasets.with_streaming_response.list() as response: @@ -196,6 +188,13 @@ def test_streaming_response_unregister(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_unregister(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"): + client.datasets.with_raw_response.unregister( + dataset_id="", + ) + class TestAsyncDatasets: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -240,17 +239,18 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"): + await async_client.datasets.with_raw_response.retrieve( + dataset_id="", + ) + @parametrize async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: dataset = await async_client.datasets.list() assert_matches_type(DatasetListResponse, dataset, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: dataset = await async_client.datasets.list( @@ -259,9 +259,6 @@ async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackCl ) assert_matches_type(DatasetListResponse, dataset, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.datasets.with_raw_response.list() @@ -271,9 +268,6 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N dataset = await response.parse() assert_matches_type(DatasetListResponse, dataset, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.datasets.with_streaming_response.list() as response: @@ -375,3 +369,10 @@ async def test_streaming_response_unregister(self, async_client: AsyncLlamaStack assert dataset is None assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_unregister(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"): + await async_client.datasets.with_raw_response.unregister( + dataset_id="", + ) diff --git a/tests/api_resources/test_eval.py b/tests/api_resources/test_eval.py index 705f498b..35cecf84 100644 --- a/tests/api_resources/test_eval.py +++ b/tests/api_resources/test_eval.py @@ -25,7 +25,7 @@ def test_method_evaluate_rows(self, client: LlamaStackClient) -> None: task_config={ "eval_candidate": { "model": "model", - "sampling_params": {"strategy": "greedy"}, + "sampling_params": {"strategy": {"type": "greedy"}}, "type": "model", }, "type": "benchmark", @@ -43,12 +43,9 @@ def test_method_evaluate_rows_with_all_params(self, client: LlamaStackClient) -> "eval_candidate": { "model": "model", "sampling_params": { - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, "type": "model", "system_message": { @@ -73,7 +70,7 @@ def test_raw_response_evaluate_rows(self, client: LlamaStackClient) -> None: task_config={ "eval_candidate": { "model": "model", - "sampling_params": {"strategy": "greedy"}, + "sampling_params": {"strategy": {"type": "greedy"}}, "type": "model", }, "type": "benchmark", @@ -94,7 +91,7 @@ def test_streaming_response_evaluate_rows(self, client: LlamaStackClient) -> Non task_config={ "eval_candidate": { "model": "model", - "sampling_params": {"strategy": "greedy"}, + "sampling_params": {"strategy": {"type": "greedy"}}, "type": "model", }, "type": "benchmark", @@ -115,7 +112,7 @@ def test_method_run_eval(self, client: LlamaStackClient) -> None: task_config={ "eval_candidate": { "model": "model", - "sampling_params": {"strategy": "greedy"}, + "sampling_params": {"strategy": {"type": "greedy"}}, "type": "model", }, "type": "benchmark", @@ -131,12 +128,9 @@ def test_method_run_eval_with_all_params(self, client: LlamaStackClient) -> None "eval_candidate": { "model": "model", "sampling_params": { - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, "type": "model", "system_message": { @@ -159,7 +153,7 @@ def test_raw_response_run_eval(self, client: LlamaStackClient) -> None: task_config={ "eval_candidate": { "model": "model", - "sampling_params": {"strategy": "greedy"}, + "sampling_params": {"strategy": {"type": "greedy"}}, "type": "model", }, "type": "benchmark", @@ -178,7 +172,7 @@ def test_streaming_response_run_eval(self, client: LlamaStackClient) -> None: task_config={ "eval_candidate": { "model": "model", - "sampling_params": {"strategy": "greedy"}, + "sampling_params": {"strategy": {"type": "greedy"}}, "type": "model", }, "type": "benchmark", @@ -205,7 +199,7 @@ async def test_method_evaluate_rows(self, async_client: AsyncLlamaStackClient) - task_config={ "eval_candidate": { "model": "model", - "sampling_params": {"strategy": "greedy"}, + "sampling_params": {"strategy": {"type": "greedy"}}, "type": "model", }, "type": "benchmark", @@ -223,12 +217,9 @@ async def test_method_evaluate_rows_with_all_params(self, async_client: AsyncLla "eval_candidate": { "model": "model", "sampling_params": { - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, "type": "model", "system_message": { @@ -253,7 +244,7 @@ async def test_raw_response_evaluate_rows(self, async_client: AsyncLlamaStackCli task_config={ "eval_candidate": { "model": "model", - "sampling_params": {"strategy": "greedy"}, + "sampling_params": {"strategy": {"type": "greedy"}}, "type": "model", }, "type": "benchmark", @@ -274,7 +265,7 @@ async def test_streaming_response_evaluate_rows(self, async_client: AsyncLlamaSt task_config={ "eval_candidate": { "model": "model", - "sampling_params": {"strategy": "greedy"}, + "sampling_params": {"strategy": {"type": "greedy"}}, "type": "model", }, "type": "benchmark", @@ -295,7 +286,7 @@ async def test_method_run_eval(self, async_client: AsyncLlamaStackClient) -> Non task_config={ "eval_candidate": { "model": "model", - "sampling_params": {"strategy": "greedy"}, + "sampling_params": {"strategy": {"type": "greedy"}}, "type": "model", }, "type": "benchmark", @@ -311,12 +302,9 @@ async def test_method_run_eval_with_all_params(self, async_client: AsyncLlamaSta "eval_candidate": { "model": "model", "sampling_params": { - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, "type": "model", "system_message": { @@ -339,7 +327,7 @@ async def test_raw_response_run_eval(self, async_client: AsyncLlamaStackClient) task_config={ "eval_candidate": { "model": "model", - "sampling_params": {"strategy": "greedy"}, + "sampling_params": {"strategy": {"type": "greedy"}}, "type": "model", }, "type": "benchmark", @@ -358,7 +346,7 @@ async def test_streaming_response_run_eval(self, async_client: AsyncLlamaStackCl task_config={ "eval_candidate": { "model": "model", - "sampling_params": {"strategy": "greedy"}, + "sampling_params": {"strategy": {"type": "greedy"}}, "type": "model", }, "type": "benchmark", diff --git a/tests/api_resources/test_eval_tasks.py b/tests/api_resources/test_eval_tasks.py index f9209844..ac2d8eb8 100644 --- a/tests/api_resources/test_eval_tasks.py +++ b/tests/api_resources/test_eval_tasks.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import EvalTask +from llama_stack_client.types import EvalTask, EvalTaskListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -20,14 +20,14 @@ class TestEvalTasks: @parametrize def test_method_retrieve(self, client: LlamaStackClient) -> None: eval_task = client.eval_tasks.retrieve( - name="name", + eval_task_id="eval_task_id", ) assert_matches_type(Optional[EvalTask], eval_task, path=["response"]) @parametrize def test_method_retrieve_with_all_params(self, client: LlamaStackClient) -> None: eval_task = client.eval_tasks.retrieve( - name="name", + eval_task_id="eval_task_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -36,7 +36,7 @@ def test_method_retrieve_with_all_params(self, client: LlamaStackClient) -> None @parametrize def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: response = client.eval_tasks.with_raw_response.retrieve( - name="name", + eval_task_id="eval_task_id", ) assert response.is_closed is True @@ -47,7 +47,7 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: @parametrize def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: with client.eval_tasks.with_streaming_response.retrieve( - name="name", + eval_task_id="eval_task_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -57,28 +57,26 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) + @parametrize + def test_path_params_retrieve(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `eval_task_id` but received ''"): + client.eval_tasks.with_raw_response.retrieve( + eval_task_id="", + ) + @parametrize def test_method_list(self, client: LlamaStackClient) -> None: eval_task = client.eval_tasks.list() - assert_matches_type(EvalTask, eval_task, path=["response"]) + assert_matches_type(EvalTaskListResponse, eval_task, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: eval_task = client.eval_tasks.list( x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(EvalTask, eval_task, path=["response"]) + assert_matches_type(EvalTaskListResponse, eval_task, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_raw_response_list(self, client: LlamaStackClient) -> None: response = client.eval_tasks.with_raw_response.list() @@ -86,11 +84,8 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" eval_task = response.parse() - assert_matches_type(EvalTask, eval_task, path=["response"]) + assert_matches_type(EvalTaskListResponse, eval_task, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_streaming_response_list(self, client: LlamaStackClient) -> None: with client.eval_tasks.with_streaming_response.list() as response: @@ -98,7 +93,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" eval_task = response.parse() - assert_matches_type(EvalTask, eval_task, path=["response"]) + assert_matches_type(EvalTaskListResponse, eval_task, path=["response"]) assert cast(Any, response.is_closed) is True @@ -160,14 +155,14 @@ class TestAsyncEvalTasks: @parametrize async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None: eval_task = await async_client.eval_tasks.retrieve( - name="name", + eval_task_id="eval_task_id", ) assert_matches_type(Optional[EvalTask], eval_task, path=["response"]) @parametrize async def test_method_retrieve_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: eval_task = await async_client.eval_tasks.retrieve( - name="name", + eval_task_id="eval_task_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -176,7 +171,7 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncLlamaSta @parametrize async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.eval_tasks.with_raw_response.retrieve( - name="name", + eval_task_id="eval_task_id", ) assert response.is_closed is True @@ -187,7 +182,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.eval_tasks.with_streaming_response.retrieve( - name="name", + eval_task_id="eval_task_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -197,28 +192,26 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `eval_task_id` but received ''"): + await async_client.eval_tasks.with_raw_response.retrieve( + eval_task_id="", + ) + @parametrize async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: eval_task = await async_client.eval_tasks.list() - assert_matches_type(EvalTask, eval_task, path=["response"]) + assert_matches_type(EvalTaskListResponse, eval_task, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: eval_task = await async_client.eval_tasks.list( x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(EvalTask, eval_task, path=["response"]) + assert_matches_type(EvalTaskListResponse, eval_task, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.eval_tasks.with_raw_response.list() @@ -226,11 +219,8 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" eval_task = await response.parse() - assert_matches_type(EvalTask, eval_task, path=["response"]) + assert_matches_type(EvalTaskListResponse, eval_task, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.eval_tasks.with_streaming_response.list() as response: @@ -238,7 +228,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient assert response.http_request.headers.get("X-Stainless-Lang") == "python" eval_task = await response.parse() - assert_matches_type(EvalTask, eval_task, path=["response"]) + assert_matches_type(EvalTaskListResponse, eval_task, path=["response"]) assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_inference.py b/tests/api_resources/test_inference.py index 80fee008..f992ee2d 100644 --- a/tests/api_resources/test_inference.py +++ b/tests/api_resources/test_inference.py @@ -57,12 +57,9 @@ def test_method_chat_completion_with_all_params_overload_1(self, client: LlamaSt "type": "json_schema", }, sampling_params={ - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, stream=False, tool_choice="auto", @@ -166,12 +163,9 @@ def test_method_chat_completion_with_all_params_overload_2(self, client: LlamaSt "type": "json_schema", }, sampling_params={ - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, tool_choice="auto", tool_prompt_format="json", @@ -262,12 +256,9 @@ def test_method_completion_with_all_params_overload_1(self, client: LlamaStackCl "type": "json_schema", }, sampling_params={ - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, stream=False, x_llama_stack_client_version="X-LlamaStack-Client-Version", @@ -334,12 +325,9 @@ def test_method_completion_with_all_params_overload_2(self, client: LlamaStackCl "type": "json_schema", }, sampling_params={ - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", @@ -463,12 +451,9 @@ async def test_method_chat_completion_with_all_params_overload_1(self, async_cli "type": "json_schema", }, sampling_params={ - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, stream=False, tool_choice="auto", @@ -572,12 +557,9 @@ async def test_method_chat_completion_with_all_params_overload_2(self, async_cli "type": "json_schema", }, sampling_params={ - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, tool_choice="auto", tool_prompt_format="json", @@ -668,12 +650,9 @@ async def test_method_completion_with_all_params_overload_1(self, async_client: "type": "json_schema", }, sampling_params={ - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, stream=False, x_llama_stack_client_version="X-LlamaStack-Client-Version", @@ -740,12 +719,9 @@ async def test_method_completion_with_all_params_overload_2(self, async_client: "type": "json_schema", }, sampling_params={ - "strategy": "greedy", + "strategy": {"type": "greedy"}, "max_tokens": 0, "repetition_penalty": 0, - "temperature": 0, - "top_k": 0, - "top_p": 0, }, x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", diff --git a/tests/api_resources/test_memory_banks.py b/tests/api_resources/test_memory_banks.py index 697c4a16..764f59cc 100644 --- a/tests/api_resources/test_memory_banks.py +++ b/tests/api_resources/test_memory_banks.py @@ -61,17 +61,18 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) + @parametrize + def test_path_params_retrieve(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `memory_bank_id` but received ''"): + client.memory_banks.with_raw_response.retrieve( + memory_bank_id="", + ) + @parametrize def test_method_list(self, client: LlamaStackClient) -> None: memory_bank = client.memory_banks.list() assert_matches_type(MemoryBankListResponse, memory_bank, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: memory_bank = client.memory_banks.list( @@ -80,9 +81,6 @@ def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: ) assert_matches_type(MemoryBankListResponse, memory_bank, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_raw_response_list(self, client: LlamaStackClient) -> None: response = client.memory_banks.with_raw_response.list() @@ -92,9 +90,6 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None: memory_bank = response.parse() assert_matches_type(MemoryBankListResponse, memory_bank, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_streaming_response_list(self, client: LlamaStackClient) -> None: with client.memory_banks.with_streaming_response.list() as response: @@ -209,6 +204,13 @@ def test_streaming_response_unregister(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_unregister(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `memory_bank_id` but received ''"): + client.memory_banks.with_raw_response.unregister( + memory_bank_id="", + ) + class TestAsyncMemoryBanks: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -253,17 +255,18 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `memory_bank_id` but received ''"): + await async_client.memory_banks.with_raw_response.retrieve( + memory_bank_id="", + ) + @parametrize async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: memory_bank = await async_client.memory_banks.list() assert_matches_type(MemoryBankListResponse, memory_bank, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: memory_bank = await async_client.memory_banks.list( @@ -272,9 +275,6 @@ async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackCl ) assert_matches_type(MemoryBankListResponse, memory_bank, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.memory_banks.with_raw_response.list() @@ -284,9 +284,6 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N memory_bank = await response.parse() assert_matches_type(MemoryBankListResponse, memory_bank, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.memory_banks.with_streaming_response.list() as response: @@ -400,3 +397,10 @@ async def test_streaming_response_unregister(self, async_client: AsyncLlamaStack assert memory_bank is None assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_unregister(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `memory_bank_id` but received ''"): + await async_client.memory_banks.with_raw_response.unregister( + memory_bank_id="", + ) diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index fac90442..b394ebe9 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import Model +from llama_stack_client.types import Model, ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -20,14 +20,14 @@ class TestModels: @parametrize def test_method_retrieve(self, client: LlamaStackClient) -> None: model = client.models.retrieve( - identifier="identifier", + model_id="model_id", ) assert_matches_type(Optional[Model], model, path=["response"]) @parametrize def test_method_retrieve_with_all_params(self, client: LlamaStackClient) -> None: model = client.models.retrieve( - identifier="identifier", + model_id="model_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -36,7 +36,7 @@ def test_method_retrieve_with_all_params(self, client: LlamaStackClient) -> None @parametrize def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: response = client.models.with_raw_response.retrieve( - identifier="identifier", + model_id="model_id", ) assert response.is_closed is True @@ -47,7 +47,7 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None: @parametrize def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: with client.models.with_streaming_response.retrieve( - identifier="identifier", + model_id="model_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -57,28 +57,26 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) + @parametrize + def test_path_params_retrieve(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model_id` but received ''"): + client.models.with_raw_response.retrieve( + model_id="", + ) + @parametrize def test_method_list(self, client: LlamaStackClient) -> None: model = client.models.list() - assert_matches_type(Model, model, path=["response"]) + assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: model = client.models.list( x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(Model, model, path=["response"]) + assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_raw_response_list(self, client: LlamaStackClient) -> None: response = client.models.with_raw_response.list() @@ -86,11 +84,8 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() - assert_matches_type(Model, model, path=["response"]) + assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_streaming_response_list(self, client: LlamaStackClient) -> None: with client.models.with_streaming_response.list() as response: @@ -98,7 +93,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() - assert_matches_type(Model, model, path=["response"]) + assert_matches_type(ModelListResponse, model, path=["response"]) assert cast(Any, response.is_closed) is True @@ -186,6 +181,13 @@ def test_streaming_response_unregister(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_unregister(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model_id` but received ''"): + client.models.with_raw_response.unregister( + model_id="", + ) + class TestAsyncModels: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -193,14 +195,14 @@ class TestAsyncModels: @parametrize async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None: model = await async_client.models.retrieve( - identifier="identifier", + model_id="model_id", ) assert_matches_type(Optional[Model], model, path=["response"]) @parametrize async def test_method_retrieve_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: model = await async_client.models.retrieve( - identifier="identifier", + model_id="model_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -209,7 +211,7 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncLlamaSta @parametrize async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.models.with_raw_response.retrieve( - identifier="identifier", + model_id="model_id", ) assert response.is_closed is True @@ -220,7 +222,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.models.with_streaming_response.retrieve( - identifier="identifier", + model_id="model_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -230,28 +232,26 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model_id` but received ''"): + await async_client.models.with_raw_response.retrieve( + model_id="", + ) + @parametrize async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: model = await async_client.models.list() - assert_matches_type(Model, model, path=["response"]) + assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: model = await async_client.models.list( x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(Model, model, path=["response"]) + assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.models.with_raw_response.list() @@ -259,11 +259,8 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = await response.parse() - assert_matches_type(Model, model, path=["response"]) + assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.models.with_streaming_response.list() as response: @@ -271,7 +268,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = await response.parse() - assert_matches_type(Model, model, path=["response"]) + assert_matches_type(ModelListResponse, model, path=["response"]) assert cast(Any, response.is_closed) is True @@ -358,3 +355,10 @@ async def test_streaming_response_unregister(self, async_client: AsyncLlamaStack assert model is None assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_unregister(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model_id` but received ''"): + await async_client.models.with_raw_response.unregister( + model_id="", + ) diff --git a/tests/api_resources/test_scoring_functions.py b/tests/api_resources/test_scoring_functions.py index fc6095c5..d88af7f6 100644 --- a/tests/api_resources/test_scoring_functions.py +++ b/tests/api_resources/test_scoring_functions.py @@ -9,9 +9,7 @@ from tests.utils import assert_matches_type from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import ( - ScoringFn, -) +from llama_stack_client.types import ScoringFn, ScoringFunctionListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -59,28 +57,26 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) + @parametrize + def test_path_params_retrieve(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `scoring_fn_id` but received ''"): + client.scoring_functions.with_raw_response.retrieve( + scoring_fn_id="", + ) + @parametrize def test_method_list(self, client: LlamaStackClient) -> None: scoring_function = client.scoring_functions.list() - assert_matches_type(ScoringFn, scoring_function, path=["response"]) + assert_matches_type(ScoringFunctionListResponse, scoring_function, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: scoring_function = client.scoring_functions.list( x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(ScoringFn, scoring_function, path=["response"]) + assert_matches_type(ScoringFunctionListResponse, scoring_function, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_raw_response_list(self, client: LlamaStackClient) -> None: response = client.scoring_functions.with_raw_response.list() @@ -88,11 +84,8 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" scoring_function = response.parse() - assert_matches_type(ScoringFn, scoring_function, path=["response"]) + assert_matches_type(ScoringFunctionListResponse, scoring_function, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_streaming_response_list(self, client: LlamaStackClient) -> None: with client.scoring_functions.with_streaming_response.list() as response: @@ -100,7 +93,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" scoring_function = response.parse() - assert_matches_type(ScoringFn, scoring_function, path=["response"]) + assert_matches_type(ScoringFunctionListResponse, scoring_function, path=["response"]) assert cast(Any, response.is_closed) is True @@ -205,28 +198,26 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `scoring_fn_id` but received ''"): + await async_client.scoring_functions.with_raw_response.retrieve( + scoring_fn_id="", + ) + @parametrize async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: scoring_function = await async_client.scoring_functions.list() - assert_matches_type(ScoringFn, scoring_function, path=["response"]) + assert_matches_type(ScoringFunctionListResponse, scoring_function, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: scoring_function = await async_client.scoring_functions.list( x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(ScoringFn, scoring_function, path=["response"]) + assert_matches_type(ScoringFunctionListResponse, scoring_function, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.scoring_functions.with_raw_response.list() @@ -234,11 +225,8 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" scoring_function = await response.parse() - assert_matches_type(ScoringFn, scoring_function, path=["response"]) + assert_matches_type(ScoringFunctionListResponse, scoring_function, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.scoring_functions.with_streaming_response.list() as response: @@ -246,7 +234,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient assert response.http_request.headers.get("X-Stainless-Lang") == "python" scoring_function = await response.parse() - assert_matches_type(ScoringFn, scoring_function, path=["response"]) + assert_matches_type(ScoringFunctionListResponse, scoring_function, path=["response"]) assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_shields.py b/tests/api_resources/test_shields.py index 7ffdc565..aa6249e6 100644 --- a/tests/api_resources/test_shields.py +++ b/tests/api_resources/test_shields.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import Shield +from llama_stack_client.types import Shield, ShieldListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -57,28 +57,26 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) + @parametrize + def test_path_params_retrieve(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `identifier` but received ''"): + client.shields.with_raw_response.retrieve( + identifier="", + ) + @parametrize def test_method_list(self, client: LlamaStackClient) -> None: shield = client.shields.list() - assert_matches_type(Shield, shield, path=["response"]) + assert_matches_type(ShieldListResponse, shield, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: shield = client.shields.list( x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(Shield, shield, path=["response"]) + assert_matches_type(ShieldListResponse, shield, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_raw_response_list(self, client: LlamaStackClient) -> None: response = client.shields.with_raw_response.list() @@ -86,11 +84,8 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" shield = response.parse() - assert_matches_type(Shield, shield, path=["response"]) + assert_matches_type(ShieldListResponse, shield, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_streaming_response_list(self, client: LlamaStackClient) -> None: with client.shields.with_streaming_response.list() as response: @@ -98,7 +93,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" shield = response.parse() - assert_matches_type(Shield, shield, path=["response"]) + assert_matches_type(ShieldListResponse, shield, path=["response"]) assert cast(Any, response.is_closed) is True @@ -189,28 +184,26 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `identifier` but received ''"): + await async_client.shields.with_raw_response.retrieve( + identifier="", + ) + @parametrize async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: shield = await async_client.shields.list() - assert_matches_type(Shield, shield, path=["response"]) + assert_matches_type(ShieldListResponse, shield, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: shield = await async_client.shields.list( x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(Shield, shield, path=["response"]) + assert_matches_type(ShieldListResponse, shield, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.shields.with_raw_response.list() @@ -218,11 +211,8 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" shield = await response.parse() - assert_matches_type(Shield, shield, path=["response"]) + assert_matches_type(ShieldListResponse, shield, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.shields.with_streaming_response.list() as response: @@ -230,7 +220,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient assert response.http_request.headers.get("X-Stainless-Lang") == "python" shield = await response.parse() - assert_matches_type(Shield, shield, path=["response"]) + assert_matches_type(ShieldListResponse, shield, path=["response"]) assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_telemetry.py b/tests/api_resources/test_telemetry.py index bc5849ea..8a43c5ce 100644 --- a/tests/api_resources/test_telemetry.py +++ b/tests/api_resources/test_telemetry.py @@ -33,8 +33,8 @@ def test_method_get_span_tree(self, client: LlamaStackClient) -> None: def test_method_get_span_tree_with_all_params(self, client: LlamaStackClient) -> None: telemetry = client.telemetry.get_span_tree( span_id="span_id", - max_depth=0, attributes_to_return=["string"], + max_depth=0, x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -361,8 +361,8 @@ async def test_method_get_span_tree(self, async_client: AsyncLlamaStackClient) - async def test_method_get_span_tree_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: telemetry = await async_client.telemetry.get_span_tree( span_id="span_id", - max_depth=0, attributes_to_return=["string"], + max_depth=0, x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) diff --git a/tests/api_resources/test_tool_runtime.py b/tests/api_resources/test_tool_runtime.py index 76c23798..26019f95 100644 --- a/tests/api_resources/test_tool_runtime.py +++ b/tests/api_resources/test_tool_runtime.py @@ -78,8 +78,8 @@ def test_method_list_tools(self, client: LlamaStackClient) -> None: @parametrize def test_method_list_tools_with_all_params(self, client: LlamaStackClient) -> None: tool_runtime = client.tool_runtime.list_tools( - tool_group_id="tool_group_id", mcp_endpoint={"uri": "uri"}, + tool_group_id="tool_group_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -173,8 +173,8 @@ async def test_method_list_tools(self, async_client: AsyncLlamaStackClient) -> N @parametrize async def test_method_list_tools_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: tool_runtime = await async_client.tool_runtime.list_tools( - tool_group_id="tool_group_id", mcp_endpoint={"uri": "uri"}, + tool_group_id="tool_group_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) diff --git a/tests/api_resources/test_toolgroups.py b/tests/api_resources/test_toolgroups.py index 7fb4344a..2ed0d635 100644 --- a/tests/api_resources/test_toolgroups.py +++ b/tests/api_resources/test_toolgroups.py @@ -9,9 +9,7 @@ from tests.utils import assert_matches_type from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import ( - ToolGroup, -) +from llama_stack_client.types import ToolGroup, ToolgroupListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,28 +17,19 @@ class TestToolgroups: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_list(self, client: LlamaStackClient) -> None: toolgroup = client.toolgroups.list() - assert_matches_type(ToolGroup, toolgroup, path=["response"]) + assert_matches_type(ToolgroupListResponse, toolgroup, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: toolgroup = client.toolgroups.list( x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(ToolGroup, toolgroup, path=["response"]) + assert_matches_type(ToolgroupListResponse, toolgroup, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_raw_response_list(self, client: LlamaStackClient) -> None: response = client.toolgroups.with_raw_response.list() @@ -48,11 +37,8 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" toolgroup = response.parse() - assert_matches_type(ToolGroup, toolgroup, path=["response"]) + assert_matches_type(ToolgroupListResponse, toolgroup, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_streaming_response_list(self, client: LlamaStackClient) -> None: with client.toolgroups.with_streaming_response.list() as response: @@ -60,7 +46,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" toolgroup = response.parse() - assert_matches_type(ToolGroup, toolgroup, path=["response"]) + assert_matches_type(ToolgroupListResponse, toolgroup, path=["response"]) assert cast(Any, response.is_closed) is True @@ -104,6 +90,13 @@ def test_streaming_response_get(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_get(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `toolgroup_id` but received ''"): + client.toolgroups.with_raw_response.get( + toolgroup_id="", + ) + @parametrize def test_method_register(self, client: LlamaStackClient) -> None: toolgroup = client.toolgroups.register( @@ -153,14 +146,14 @@ def test_streaming_response_register(self, client: LlamaStackClient) -> None: @parametrize def test_method_unregister(self, client: LlamaStackClient) -> None: toolgroup = client.toolgroups.unregister( - tool_group_id="tool_group_id", + toolgroup_id="toolgroup_id", ) assert toolgroup is None @parametrize def test_method_unregister_with_all_params(self, client: LlamaStackClient) -> None: toolgroup = client.toolgroups.unregister( - tool_group_id="tool_group_id", + toolgroup_id="toolgroup_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -169,7 +162,7 @@ def test_method_unregister_with_all_params(self, client: LlamaStackClient) -> No @parametrize def test_raw_response_unregister(self, client: LlamaStackClient) -> None: response = client.toolgroups.with_raw_response.unregister( - tool_group_id="tool_group_id", + toolgroup_id="toolgroup_id", ) assert response.is_closed is True @@ -180,7 +173,7 @@ def test_raw_response_unregister(self, client: LlamaStackClient) -> None: @parametrize def test_streaming_response_unregister(self, client: LlamaStackClient) -> None: with client.toolgroups.with_streaming_response.unregister( - tool_group_id="tool_group_id", + toolgroup_id="toolgroup_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -190,32 +183,30 @@ def test_streaming_response_unregister(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_unregister(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `toolgroup_id` but received ''"): + client.toolgroups.with_raw_response.unregister( + toolgroup_id="", + ) + class TestAsyncToolgroups: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: toolgroup = await async_client.toolgroups.list() - assert_matches_type(ToolGroup, toolgroup, path=["response"]) + assert_matches_type(ToolgroupListResponse, toolgroup, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: toolgroup = await async_client.toolgroups.list( x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(ToolGroup, toolgroup, path=["response"]) + assert_matches_type(ToolgroupListResponse, toolgroup, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.toolgroups.with_raw_response.list() @@ -223,11 +214,8 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" toolgroup = await response.parse() - assert_matches_type(ToolGroup, toolgroup, path=["response"]) + assert_matches_type(ToolgroupListResponse, toolgroup, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.toolgroups.with_streaming_response.list() as response: @@ -235,7 +223,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient assert response.http_request.headers.get("X-Stainless-Lang") == "python" toolgroup = await response.parse() - assert_matches_type(ToolGroup, toolgroup, path=["response"]) + assert_matches_type(ToolgroupListResponse, toolgroup, path=["response"]) assert cast(Any, response.is_closed) is True @@ -279,6 +267,13 @@ async def test_streaming_response_get(self, async_client: AsyncLlamaStackClient) assert cast(Any, response.is_closed) is True + @parametrize + async def test_path_params_get(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `toolgroup_id` but received ''"): + await async_client.toolgroups.with_raw_response.get( + toolgroup_id="", + ) + @parametrize async def test_method_register(self, async_client: AsyncLlamaStackClient) -> None: toolgroup = await async_client.toolgroups.register( @@ -328,14 +323,14 @@ async def test_streaming_response_register(self, async_client: AsyncLlamaStackCl @parametrize async def test_method_unregister(self, async_client: AsyncLlamaStackClient) -> None: toolgroup = await async_client.toolgroups.unregister( - tool_group_id="tool_group_id", + toolgroup_id="toolgroup_id", ) assert toolgroup is None @parametrize async def test_method_unregister_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: toolgroup = await async_client.toolgroups.unregister( - tool_group_id="tool_group_id", + toolgroup_id="toolgroup_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -344,7 +339,7 @@ async def test_method_unregister_with_all_params(self, async_client: AsyncLlamaS @parametrize async def test_raw_response_unregister(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.toolgroups.with_raw_response.unregister( - tool_group_id="tool_group_id", + toolgroup_id="toolgroup_id", ) assert response.is_closed is True @@ -355,7 +350,7 @@ async def test_raw_response_unregister(self, async_client: AsyncLlamaStackClient @parametrize async def test_streaming_response_unregister(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.toolgroups.with_streaming_response.unregister( - tool_group_id="tool_group_id", + toolgroup_id="toolgroup_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -364,3 +359,10 @@ async def test_streaming_response_unregister(self, async_client: AsyncLlamaStack assert toolgroup is None assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_unregister(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `toolgroup_id` but received ''"): + await async_client.toolgroups.with_raw_response.unregister( + toolgroup_id="", + ) diff --git a/tests/api_resources/test_tools.py b/tests/api_resources/test_tools.py index 12e2e66a..4cfab520 100644 --- a/tests/api_resources/test_tools.py +++ b/tests/api_resources/test_tools.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient -from llama_stack_client.types import Tool +from llama_stack_client.types import Tool, ToolListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -17,29 +17,20 @@ class TestTools: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_list(self, client: LlamaStackClient) -> None: tool = client.tools.list() - assert_matches_type(Tool, tool, path=["response"]) + assert_matches_type(ToolListResponse, tool, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_list_with_all_params(self, client: LlamaStackClient) -> None: tool = client.tools.list( - tool_group_id="tool_group_id", + toolgroup_id="toolgroup_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(Tool, tool, path=["response"]) + assert_matches_type(ToolListResponse, tool, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_raw_response_list(self, client: LlamaStackClient) -> None: response = client.tools.with_raw_response.list() @@ -47,11 +38,8 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" tool = response.parse() - assert_matches_type(Tool, tool, path=["response"]) + assert_matches_type(ToolListResponse, tool, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_streaming_response_list(self, client: LlamaStackClient) -> None: with client.tools.with_streaming_response.list() as response: @@ -59,7 +47,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" tool = response.parse() - assert_matches_type(Tool, tool, path=["response"]) + assert_matches_type(ToolListResponse, tool, path=["response"]) assert cast(Any, response.is_closed) is True @@ -103,33 +91,31 @@ def test_streaming_response_get(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_get(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `tool_name` but received ''"): + client.tools.with_raw_response.get( + tool_name="", + ) + class TestAsyncTools: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None: tool = await async_client.tools.list() - assert_matches_type(Tool, tool, path=["response"]) + assert_matches_type(ToolListResponse, tool, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: tool = await async_client.tools.list( - tool_group_id="tool_group_id", + toolgroup_id="toolgroup_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(Tool, tool, path=["response"]) + assert_matches_type(ToolListResponse, tool, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.tools.with_raw_response.list() @@ -137,11 +123,8 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" tool = await response.parse() - assert_matches_type(Tool, tool, path=["response"]) + assert_matches_type(ToolListResponse, tool, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.tools.with_streaming_response.list() as response: @@ -149,7 +132,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient assert response.http_request.headers.get("X-Stainless-Lang") == "python" tool = await response.parse() - assert_matches_type(Tool, tool, path=["response"]) + assert_matches_type(ToolListResponse, tool, path=["response"]) assert cast(Any, response.is_closed) is True @@ -192,3 +175,10 @@ async def test_streaming_response_get(self, async_client: AsyncLlamaStackClient) assert_matches_type(Tool, tool, path=["response"]) assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_get(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `tool_name` but received ''"): + await async_client.tools.with_raw_response.get( + tool_name="", + )