From e058cbcb3e0e3ac661120fa63e8c0b887515aa9e Mon Sep 17 00:00:00 2001 From: Dinesh Yeduguru Date: Thu, 16 Jan 2025 12:11:31 -0800 Subject: [PATCH] Sync updates from stainless branch: dineshyv/dev --- src/llama_stack_client/_client.py | 321 ++++++++++----- src/llama_stack_client/resources/eval/eval.py | 42 +- src/llama_stack_client/resources/eval/jobs.py | 81 ++-- src/llama_stack_client/resources/providers.py | 4 +- src/llama_stack_client/resources/routes.py | 23 +- src/llama_stack_client/resources/telemetry.py | 385 ++++++++++++++---- src/llama_stack_client/types/__init__.py | 2 + src/llama_stack_client/types/eval/__init__.py | 3 - .../types/eval_evaluate_rows_params.py | 2 - .../types/eval_run_eval_params.py | 2 - .../types/route_list_response.py | 4 +- .../types/telemetry_get_span_response.py | 24 ++ .../types/telemetry_get_span_tree_params.py | 4 +- .../types/telemetry_query_spans_response.py | 8 +- .../types/telemetry_query_traces_response.py | 10 + tests/api_resources/eval/test_jobs.py | 52 +++ tests/api_resources/test_eval.py | 96 ++++- tests/api_resources/test_telemetry.py | 290 ++++++++++--- 18 files changed, 1004 insertions(+), 349 deletions(-) create mode 100644 src/llama_stack_client/types/telemetry_get_span_response.py create mode 100644 src/llama_stack_client/types/telemetry_query_traces_response.py diff --git a/src/llama_stack_client/_client.py b/src/llama_stack_client/_client.py index 429826ad..59d40871 100644 --- a/src/llama_stack_client/_client.py +++ b/src/llama_stack_client/_client.py @@ -3,58 +3,59 @@ import json import os -from typing import Any, Union, Mapping -from typing_extensions import Self, override +from typing import Any, Mapping, Union import httpx +from typing_extensions import Self, override from . import _exceptions +from ._base_client import ( + DEFAULT_MAX_RETRIES, + AsyncAPIClient, + SyncAPIClient, +) +from ._exceptions import APIStatusError from ._qs import Querystring +from ._streaming import AsyncStream as AsyncStream +from ._streaming import Stream as Stream from ._types import ( NOT_GIVEN, - Omit, - Timeout, NotGiven, - Transport, + Omit, ProxiesTypes, RequestOptions, + Timeout, + Transport, ) from ._utils import ( - is_given, get_async_library, + is_given, ) from ._version import __version__ from .resources import ( - tools, + batch_inference, + datasetio, + datasets, + eval_tasks, + inference, + inspect, memory, + memory_banks, models, + providers, routes, safety, - inspect, scoring, + scoring_functions, shields, - datasets, - datasetio, - inference, - providers, + synthetic_data_generation, telemetry, - eval_tasks, - toolgroups, - memory_banks, tool_runtime, - batch_inference, - scoring_functions, - synthetic_data_generation, -) -from ._streaming import Stream as Stream, AsyncStream as AsyncStream -from ._exceptions import APIStatusError -from ._base_client import ( - DEFAULT_MAX_RETRIES, - SyncAPIClient, - AsyncAPIClient, + toolgroups, + tools, ) -from .resources.eval import eval from .resources.agents import agents +from .resources.eval import eval from .resources.post_training import post_training __all__ = [ @@ -125,12 +126,12 @@ def __init__( if base_url is None: base_url = os.environ.get("LLAMA_STACK_CLIENT_BASE_URL") if base_url is None: - base_url = f"http://any-hosted-llama-stack.com" + base_url = "http://any-hosted-llama-stack.com" + custom_headers = default_headers or {} + custom_headers["X-LlamaStack-Client-Version"] = __version__ if provider_data is not None: - if default_headers is None: - default_headers = {} - default_headers["X-LlamaStack-ProviderData"] = json.dumps(provider_data) + custom_headers["X-LlamaStack-Provider-Data"] = json.dumps(provider_data) super().__init__( version=__version__, @@ -138,7 +139,7 @@ def __init__( max_retries=max_retries, timeout=timeout, http_client=http_client, - custom_headers=default_headers, + custom_headers=custom_headers, custom_query=default_query, _strict_response_validation=_strict_response_validation, ) @@ -160,7 +161,9 @@ def __init__( self.routes = routes.RoutesResource(self) self.safety = safety.SafetyResource(self) self.shields = shields.ShieldsResource(self) - self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResource(self) + self.synthetic_data_generation = ( + synthetic_data_generation.SyntheticDataGenerationResource(self) + ) self.telemetry = telemetry.TelemetryResource(self) self.datasetio = datasetio.DatasetioResource(self) self.scoring = scoring.ScoringResource(self) @@ -200,10 +203,14 @@ def copy( Create a new client instance re-using the same options given to the current client with optional overriding. """ if default_headers is not None and set_default_headers is not None: - raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") + raise ValueError( + "The `default_headers` and `set_default_headers` arguments are mutually exclusive" + ) if default_query is not None and set_default_query is not None: - raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") + raise ValueError( + "The `default_query` and `set_default_query` arguments are mutually exclusive" + ) headers = self._custom_headers if default_headers is not None: @@ -244,10 +251,14 @@ def _make_status_error( return _exceptions.BadRequestError(err_msg, response=response, body=body) if response.status_code == 401: - return _exceptions.AuthenticationError(err_msg, response=response, body=body) + return _exceptions.AuthenticationError( + err_msg, response=response, body=body + ) if response.status_code == 403: - return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) + return _exceptions.PermissionDeniedError( + err_msg, response=response, body=body + ) if response.status_code == 404: return _exceptions.NotFoundError(err_msg, response=response, body=body) @@ -256,13 +267,17 @@ def _make_status_error( return _exceptions.ConflictError(err_msg, response=response, body=body) if response.status_code == 422: - return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) + return _exceptions.UnprocessableEntityError( + err_msg, response=response, body=body + ) if response.status_code == 429: return _exceptions.RateLimitError(err_msg, response=response, body=body) if response.status_code >= 500: - return _exceptions.InternalServerError(err_msg, response=response, body=body) + return _exceptions.InternalServerError( + err_msg, response=response, body=body + ) return APIStatusError(err_msg, response=response, body=body) @@ -284,7 +299,9 @@ class AsyncLlamaStackClient(AsyncAPIClient): routes: routes.AsyncRoutesResource safety: safety.AsyncSafetyResource shields: shields.AsyncShieldsResource - synthetic_data_generation: synthetic_data_generation.AsyncSyntheticDataGenerationResource + synthetic_data_generation: ( + synthetic_data_generation.AsyncSyntheticDataGenerationResource + ) telemetry: telemetry.AsyncTelemetryResource datasetio: datasetio.AsyncDatasetioResource scoring: scoring.AsyncScoringResource @@ -322,12 +339,12 @@ def __init__( if base_url is None: base_url = os.environ.get("LLAMA_STACK_CLIENT_BASE_URL") if base_url is None: - base_url = f"http://any-hosted-llama-stack.com" + base_url = "http://any-hosted-llama-stack.com" + custom_headers = default_headers or {} + custom_headers["X-LlamaStack-Client-Version"] = __version__ if provider_data is not None: - if default_headers is None: - default_headers = {} - default_headers["X-LlamaStack-ProviderData"] = json.dumps(provider_data) + custom_headers["X-LlamaStack-Provider-Data"] = json.dumps(provider_data) super().__init__( version=__version__, @@ -335,7 +352,7 @@ def __init__( max_retries=max_retries, timeout=timeout, http_client=http_client, - custom_headers=default_headers, + custom_headers=custom_headers, custom_query=default_query, _strict_response_validation=_strict_response_validation, ) @@ -357,7 +374,9 @@ def __init__( self.routes = routes.AsyncRoutesResource(self) self.safety = safety.AsyncSafetyResource(self) self.shields = shields.AsyncShieldsResource(self) - self.synthetic_data_generation = synthetic_data_generation.AsyncSyntheticDataGenerationResource(self) + self.synthetic_data_generation = ( + synthetic_data_generation.AsyncSyntheticDataGenerationResource(self) + ) self.telemetry = telemetry.AsyncTelemetryResource(self) self.datasetio = datasetio.AsyncDatasetioResource(self) self.scoring = scoring.AsyncScoringResource(self) @@ -397,10 +416,14 @@ def copy( Create a new client instance re-using the same options given to the current client with optional overriding. """ if default_headers is not None and set_default_headers is not None: - raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") + raise ValueError( + "The `default_headers` and `set_default_headers` arguments are mutually exclusive" + ) if default_query is not None and set_default_query is not None: - raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") + raise ValueError( + "The `default_query` and `set_default_query` arguments are mutually exclusive" + ) headers = self._custom_headers if default_headers is not None: @@ -441,10 +464,14 @@ def _make_status_error( return _exceptions.BadRequestError(err_msg, response=response, body=body) if response.status_code == 401: - return _exceptions.AuthenticationError(err_msg, response=response, body=body) + return _exceptions.AuthenticationError( + err_msg, response=response, body=body + ) if response.status_code == 403: - return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) + return _exceptions.PermissionDeniedError( + err_msg, response=response, body=body + ) if response.status_code == 404: return _exceptions.NotFoundError(err_msg, response=response, body=body) @@ -453,138 +480,232 @@ def _make_status_error( return _exceptions.ConflictError(err_msg, response=response, body=body) if response.status_code == 422: - return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) + return _exceptions.UnprocessableEntityError( + err_msg, response=response, body=body + ) if response.status_code == 429: return _exceptions.RateLimitError(err_msg, response=response, body=body) if response.status_code >= 500: - return _exceptions.InternalServerError(err_msg, response=response, body=body) + return _exceptions.InternalServerError( + err_msg, response=response, body=body + ) return APIStatusError(err_msg, response=response, body=body) class LlamaStackClientWithRawResponse: def __init__(self, client: LlamaStackClient) -> None: - self.toolgroups = toolgroups.ToolgroupsResourceWithRawResponse(client.toolgroups) + self.toolgroups = toolgroups.ToolgroupsResourceWithRawResponse( + client.toolgroups + ) self.tools = tools.ToolsResourceWithRawResponse(client.tools) - self.tool_runtime = tool_runtime.ToolRuntimeResourceWithRawResponse(client.tool_runtime) + self.tool_runtime = tool_runtime.ToolRuntimeResourceWithRawResponse( + client.tool_runtime + ) self.agents = agents.AgentsResourceWithRawResponse(client.agents) - self.batch_inference = batch_inference.BatchInferenceResourceWithRawResponse(client.batch_inference) + self.batch_inference = batch_inference.BatchInferenceResourceWithRawResponse( + client.batch_inference + ) self.datasets = datasets.DatasetsResourceWithRawResponse(client.datasets) self.eval = eval.EvalResourceWithRawResponse(client.eval) self.inspect = inspect.InspectResourceWithRawResponse(client.inspect) self.inference = inference.InferenceResourceWithRawResponse(client.inference) self.memory = memory.MemoryResourceWithRawResponse(client.memory) - self.memory_banks = memory_banks.MemoryBanksResourceWithRawResponse(client.memory_banks) + self.memory_banks = memory_banks.MemoryBanksResourceWithRawResponse( + client.memory_banks + ) self.models = models.ModelsResourceWithRawResponse(client.models) - self.post_training = post_training.PostTrainingResourceWithRawResponse(client.post_training) + self.post_training = post_training.PostTrainingResourceWithRawResponse( + client.post_training + ) self.providers = providers.ProvidersResourceWithRawResponse(client.providers) self.routes = routes.RoutesResourceWithRawResponse(client.routes) self.safety = safety.SafetyResourceWithRawResponse(client.safety) self.shields = shields.ShieldsResourceWithRawResponse(client.shields) - self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResourceWithRawResponse( - client.synthetic_data_generation + self.synthetic_data_generation = ( + synthetic_data_generation.SyntheticDataGenerationResourceWithRawResponse( + client.synthetic_data_generation + ) ) self.telemetry = telemetry.TelemetryResourceWithRawResponse(client.telemetry) self.datasetio = datasetio.DatasetioResourceWithRawResponse(client.datasetio) self.scoring = scoring.ScoringResourceWithRawResponse(client.scoring) - self.scoring_functions = scoring_functions.ScoringFunctionsResourceWithRawResponse(client.scoring_functions) + self.scoring_functions = ( + scoring_functions.ScoringFunctionsResourceWithRawResponse( + client.scoring_functions + ) + ) self.eval_tasks = eval_tasks.EvalTasksResourceWithRawResponse(client.eval_tasks) class AsyncLlamaStackClientWithRawResponse: def __init__(self, client: AsyncLlamaStackClient) -> None: - self.toolgroups = toolgroups.AsyncToolgroupsResourceWithRawResponse(client.toolgroups) + self.toolgroups = toolgroups.AsyncToolgroupsResourceWithRawResponse( + client.toolgroups + ) self.tools = tools.AsyncToolsResourceWithRawResponse(client.tools) - self.tool_runtime = tool_runtime.AsyncToolRuntimeResourceWithRawResponse(client.tool_runtime) + self.tool_runtime = tool_runtime.AsyncToolRuntimeResourceWithRawResponse( + client.tool_runtime + ) self.agents = agents.AsyncAgentsResourceWithRawResponse(client.agents) - self.batch_inference = batch_inference.AsyncBatchInferenceResourceWithRawResponse(client.batch_inference) + self.batch_inference = ( + batch_inference.AsyncBatchInferenceResourceWithRawResponse( + client.batch_inference + ) + ) self.datasets = datasets.AsyncDatasetsResourceWithRawResponse(client.datasets) self.eval = eval.AsyncEvalResourceWithRawResponse(client.eval) self.inspect = inspect.AsyncInspectResourceWithRawResponse(client.inspect) - self.inference = inference.AsyncInferenceResourceWithRawResponse(client.inference) + self.inference = inference.AsyncInferenceResourceWithRawResponse( + client.inference + ) self.memory = memory.AsyncMemoryResourceWithRawResponse(client.memory) - self.memory_banks = memory_banks.AsyncMemoryBanksResourceWithRawResponse(client.memory_banks) + self.memory_banks = memory_banks.AsyncMemoryBanksResourceWithRawResponse( + client.memory_banks + ) self.models = models.AsyncModelsResourceWithRawResponse(client.models) - self.post_training = post_training.AsyncPostTrainingResourceWithRawResponse(client.post_training) - self.providers = providers.AsyncProvidersResourceWithRawResponse(client.providers) + self.post_training = post_training.AsyncPostTrainingResourceWithRawResponse( + client.post_training + ) + self.providers = providers.AsyncProvidersResourceWithRawResponse( + client.providers + ) self.routes = routes.AsyncRoutesResourceWithRawResponse(client.routes) self.safety = safety.AsyncSafetyResourceWithRawResponse(client.safety) self.shields = shields.AsyncShieldsResourceWithRawResponse(client.shields) self.synthetic_data_generation = synthetic_data_generation.AsyncSyntheticDataGenerationResourceWithRawResponse( client.synthetic_data_generation ) - self.telemetry = telemetry.AsyncTelemetryResourceWithRawResponse(client.telemetry) - self.datasetio = datasetio.AsyncDatasetioResourceWithRawResponse(client.datasetio) + self.telemetry = telemetry.AsyncTelemetryResourceWithRawResponse( + client.telemetry + ) + self.datasetio = datasetio.AsyncDatasetioResourceWithRawResponse( + client.datasetio + ) self.scoring = scoring.AsyncScoringResourceWithRawResponse(client.scoring) - self.scoring_functions = scoring_functions.AsyncScoringFunctionsResourceWithRawResponse( - client.scoring_functions + self.scoring_functions = ( + scoring_functions.AsyncScoringFunctionsResourceWithRawResponse( + client.scoring_functions + ) + ) + self.eval_tasks = eval_tasks.AsyncEvalTasksResourceWithRawResponse( + client.eval_tasks ) - self.eval_tasks = eval_tasks.AsyncEvalTasksResourceWithRawResponse(client.eval_tasks) class LlamaStackClientWithStreamedResponse: def __init__(self, client: LlamaStackClient) -> None: - self.toolgroups = toolgroups.ToolgroupsResourceWithStreamingResponse(client.toolgroups) + self.toolgroups = toolgroups.ToolgroupsResourceWithStreamingResponse( + client.toolgroups + ) self.tools = tools.ToolsResourceWithStreamingResponse(client.tools) - self.tool_runtime = tool_runtime.ToolRuntimeResourceWithStreamingResponse(client.tool_runtime) + self.tool_runtime = tool_runtime.ToolRuntimeResourceWithStreamingResponse( + client.tool_runtime + ) self.agents = agents.AgentsResourceWithStreamingResponse(client.agents) - self.batch_inference = batch_inference.BatchInferenceResourceWithStreamingResponse(client.batch_inference) + self.batch_inference = ( + batch_inference.BatchInferenceResourceWithStreamingResponse( + client.batch_inference + ) + ) self.datasets = datasets.DatasetsResourceWithStreamingResponse(client.datasets) self.eval = eval.EvalResourceWithStreamingResponse(client.eval) self.inspect = inspect.InspectResourceWithStreamingResponse(client.inspect) - self.inference = inference.InferenceResourceWithStreamingResponse(client.inference) + self.inference = inference.InferenceResourceWithStreamingResponse( + client.inference + ) self.memory = memory.MemoryResourceWithStreamingResponse(client.memory) - self.memory_banks = memory_banks.MemoryBanksResourceWithStreamingResponse(client.memory_banks) + self.memory_banks = memory_banks.MemoryBanksResourceWithStreamingResponse( + client.memory_banks + ) self.models = models.ModelsResourceWithStreamingResponse(client.models) - self.post_training = post_training.PostTrainingResourceWithStreamingResponse(client.post_training) - self.providers = providers.ProvidersResourceWithStreamingResponse(client.providers) + self.post_training = post_training.PostTrainingResourceWithStreamingResponse( + client.post_training + ) + self.providers = providers.ProvidersResourceWithStreamingResponse( + client.providers + ) self.routes = routes.RoutesResourceWithStreamingResponse(client.routes) self.safety = safety.SafetyResourceWithStreamingResponse(client.safety) self.shields = shields.ShieldsResourceWithStreamingResponse(client.shields) self.synthetic_data_generation = synthetic_data_generation.SyntheticDataGenerationResourceWithStreamingResponse( client.synthetic_data_generation ) - self.telemetry = telemetry.TelemetryResourceWithStreamingResponse(client.telemetry) - self.datasetio = datasetio.DatasetioResourceWithStreamingResponse(client.datasetio) + self.telemetry = telemetry.TelemetryResourceWithStreamingResponse( + client.telemetry + ) + self.datasetio = datasetio.DatasetioResourceWithStreamingResponse( + client.datasetio + ) self.scoring = scoring.ScoringResourceWithStreamingResponse(client.scoring) - self.scoring_functions = scoring_functions.ScoringFunctionsResourceWithStreamingResponse( - client.scoring_functions + self.scoring_functions = ( + scoring_functions.ScoringFunctionsResourceWithStreamingResponse( + client.scoring_functions + ) + ) + self.eval_tasks = eval_tasks.EvalTasksResourceWithStreamingResponse( + client.eval_tasks ) - self.eval_tasks = eval_tasks.EvalTasksResourceWithStreamingResponse(client.eval_tasks) class AsyncLlamaStackClientWithStreamedResponse: def __init__(self, client: AsyncLlamaStackClient) -> None: - self.toolgroups = toolgroups.AsyncToolgroupsResourceWithStreamingResponse(client.toolgroups) + self.toolgroups = toolgroups.AsyncToolgroupsResourceWithStreamingResponse( + client.toolgroups + ) self.tools = tools.AsyncToolsResourceWithStreamingResponse(client.tools) - self.tool_runtime = tool_runtime.AsyncToolRuntimeResourceWithStreamingResponse(client.tool_runtime) + self.tool_runtime = tool_runtime.AsyncToolRuntimeResourceWithStreamingResponse( + client.tool_runtime + ) self.agents = agents.AsyncAgentsResourceWithStreamingResponse(client.agents) - self.batch_inference = batch_inference.AsyncBatchInferenceResourceWithStreamingResponse(client.batch_inference) - self.datasets = datasets.AsyncDatasetsResourceWithStreamingResponse(client.datasets) + self.batch_inference = ( + batch_inference.AsyncBatchInferenceResourceWithStreamingResponse( + client.batch_inference + ) + ) + self.datasets = datasets.AsyncDatasetsResourceWithStreamingResponse( + client.datasets + ) self.eval = eval.AsyncEvalResourceWithStreamingResponse(client.eval) self.inspect = inspect.AsyncInspectResourceWithStreamingResponse(client.inspect) - self.inference = inference.AsyncInferenceResourceWithStreamingResponse(client.inference) + self.inference = inference.AsyncInferenceResourceWithStreamingResponse( + client.inference + ) self.memory = memory.AsyncMemoryResourceWithStreamingResponse(client.memory) - self.memory_banks = memory_banks.AsyncMemoryBanksResourceWithStreamingResponse(client.memory_banks) + self.memory_banks = memory_banks.AsyncMemoryBanksResourceWithStreamingResponse( + client.memory_banks + ) self.models = models.AsyncModelsResourceWithStreamingResponse(client.models) - self.post_training = post_training.AsyncPostTrainingResourceWithStreamingResponse(client.post_training) - self.providers = providers.AsyncProvidersResourceWithStreamingResponse(client.providers) + self.post_training = ( + post_training.AsyncPostTrainingResourceWithStreamingResponse( + client.post_training + ) + ) + self.providers = providers.AsyncProvidersResourceWithStreamingResponse( + client.providers + ) self.routes = routes.AsyncRoutesResourceWithStreamingResponse(client.routes) self.safety = safety.AsyncSafetyResourceWithStreamingResponse(client.safety) self.shields = shields.AsyncShieldsResourceWithStreamingResponse(client.shields) - self.synthetic_data_generation = ( - synthetic_data_generation.AsyncSyntheticDataGenerationResourceWithStreamingResponse( - client.synthetic_data_generation - ) + self.synthetic_data_generation = synthetic_data_generation.AsyncSyntheticDataGenerationResourceWithStreamingResponse( + client.synthetic_data_generation + ) + self.telemetry = telemetry.AsyncTelemetryResourceWithStreamingResponse( + client.telemetry + ) + self.datasetio = datasetio.AsyncDatasetioResourceWithStreamingResponse( + client.datasetio ) - self.telemetry = telemetry.AsyncTelemetryResourceWithStreamingResponse(client.telemetry) - self.datasetio = datasetio.AsyncDatasetioResourceWithStreamingResponse(client.datasetio) self.scoring = scoring.AsyncScoringResourceWithStreamingResponse(client.scoring) - self.scoring_functions = scoring_functions.AsyncScoringFunctionsResourceWithStreamingResponse( - client.scoring_functions + self.scoring_functions = ( + scoring_functions.AsyncScoringFunctionsResourceWithStreamingResponse( + client.scoring_functions + ) + ) + self.eval_tasks = eval_tasks.AsyncEvalTasksResourceWithStreamingResponse( + client.eval_tasks ) - self.eval_tasks = eval_tasks.AsyncEvalTasksResourceWithStreamingResponse(client.eval_tasks) Client = LlamaStackClient diff --git a/src/llama_stack_client/resources/eval/eval.py b/src/llama_stack_client/resources/eval/eval.py index 094ad09d..fabf6d4f 100644 --- a/src/llama_stack_client/resources/eval/eval.py +++ b/src/llama_stack_client/resources/eval/eval.py @@ -62,11 +62,11 @@ def with_streaming_response(self) -> EvalResourceWithStreamingResponse: def evaluate_rows( self, + task_id: str, *, input_rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]], scoring_functions: List[str], task_config: eval_evaluate_rows_params.TaskConfig, - task_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -86,6 +86,8 @@ def evaluate_rows( timeout: Override the client-level default timeout for this request, in seconds """ + if not task_id: + raise ValueError(f"Expected a non-empty value for `task_id` but received {task_id!r}") extra_headers = { **strip_not_given( { @@ -96,13 +98,12 @@ def evaluate_rows( **(extra_headers or {}), } return self._post( - "/v1/eval/evaluate-rows", + f"/v1/eval/tasks/{task_id}/evaluations", body=maybe_transform( { "input_rows": input_rows, "scoring_functions": scoring_functions, "task_config": task_config, - "task_id": task_id, }, eval_evaluate_rows_params.EvalEvaluateRowsParams, ), @@ -114,9 +115,9 @@ def evaluate_rows( def run_eval( self, + task_id: str, *, task_config: eval_run_eval_params.TaskConfig, - task_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -136,6 +137,8 @@ def run_eval( timeout: Override the client-level default timeout for this request, in seconds """ + if not task_id: + raise ValueError(f"Expected a non-empty value for `task_id` but received {task_id!r}") extra_headers = { **strip_not_given( { @@ -146,14 +149,8 @@ def run_eval( **(extra_headers or {}), } return self._post( - "/v1/eval/run", - body=maybe_transform( - { - "task_config": task_config, - "task_id": task_id, - }, - eval_run_eval_params.EvalRunEvalParams, - ), + f"/v1/eval/tasks/{task_id}/jobs", + body=maybe_transform({"task_config": task_config}, eval_run_eval_params.EvalRunEvalParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -187,11 +184,11 @@ def with_streaming_response(self) -> AsyncEvalResourceWithStreamingResponse: async def evaluate_rows( self, + task_id: str, *, input_rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]], scoring_functions: List[str], task_config: eval_evaluate_rows_params.TaskConfig, - task_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -211,6 +208,8 @@ async def evaluate_rows( timeout: Override the client-level default timeout for this request, in seconds """ + if not task_id: + raise ValueError(f"Expected a non-empty value for `task_id` but received {task_id!r}") extra_headers = { **strip_not_given( { @@ -221,13 +220,12 @@ async def evaluate_rows( **(extra_headers or {}), } return await self._post( - "/v1/eval/evaluate-rows", + f"/v1/eval/tasks/{task_id}/evaluations", body=await async_maybe_transform( { "input_rows": input_rows, "scoring_functions": scoring_functions, "task_config": task_config, - "task_id": task_id, }, eval_evaluate_rows_params.EvalEvaluateRowsParams, ), @@ -239,9 +237,9 @@ async def evaluate_rows( async def run_eval( self, + task_id: str, *, task_config: eval_run_eval_params.TaskConfig, - task_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -261,6 +259,8 @@ async def run_eval( timeout: Override the client-level default timeout for this request, in seconds """ + if not task_id: + raise ValueError(f"Expected a non-empty value for `task_id` but received {task_id!r}") extra_headers = { **strip_not_given( { @@ -271,14 +271,8 @@ async def run_eval( **(extra_headers or {}), } return await self._post( - "/v1/eval/run", - body=await async_maybe_transform( - { - "task_config": task_config, - "task_id": task_id, - }, - eval_run_eval_params.EvalRunEvalParams, - ), + f"/v1/eval/tasks/{task_id}/jobs", + body=await async_maybe_transform({"task_config": task_config}, eval_run_eval_params.EvalRunEvalParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/llama_stack_client/resources/eval/jobs.py b/src/llama_stack_client/resources/eval/jobs.py index 944ca1a9..c2657abf 100644 --- a/src/llama_stack_client/resources/eval/jobs.py +++ b/src/llama_stack_client/resources/eval/jobs.py @@ -7,11 +7,7 @@ import httpx from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import ( - maybe_transform, - strip_not_given, - async_maybe_transform, -) +from ..._utils import strip_not_given from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( @@ -20,7 +16,6 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ...types.eval import job_cancel_params, job_status_params, job_retrieve_params from ..._base_client import make_request_options from ...types.evaluate_response import EvaluateResponse from ...types.eval.job_status_response import JobStatusResponse @@ -72,6 +67,8 @@ def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not task_id: + raise ValueError(f"Expected a non-empty value for `task_id` but received {task_id!r}") if not job_id: raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") extra_headers = { @@ -84,21 +81,17 @@ def retrieve( **(extra_headers or {}), } return self._get( - f"/v1/eval/jobs/{job_id}/result", + f"/v1/eval/tasks/{task_id}/jobs/{job_id}/result", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"task_id": task_id}, job_retrieve_params.JobRetrieveParams), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=EvaluateResponse, ) def cancel( self, - *, job_id: str, + *, task_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, @@ -119,6 +112,10 @@ def cancel( timeout: Override the client-level default timeout for this request, in seconds """ + if not task_id: + raise ValueError(f"Expected a non-empty value for `task_id` but received {task_id!r}") + if not job_id: + raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") extra_headers = {"Accept": "*/*", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -129,15 +126,8 @@ def cancel( ), **(extra_headers or {}), } - return self._post( - "/v1/eval/jobs/cancel", - body=maybe_transform( - { - "job_id": job_id, - "task_id": task_id, - }, - job_cancel_params.JobCancelParams, - ), + return self._delete( + f"/v1/eval/tasks/{task_id}/jobs/{job_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -168,6 +158,8 @@ def status( timeout: Override the client-level default timeout for this request, in seconds """ + if not task_id: + raise ValueError(f"Expected a non-empty value for `task_id` but received {task_id!r}") if not job_id: raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") extra_headers = { @@ -180,13 +172,9 @@ def status( **(extra_headers or {}), } return self._get( - f"/v1/eval/jobs/{job_id}", + f"/v1/eval/tasks/{task_id}/jobs/{job_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"task_id": task_id}, job_status_params.JobStatusParams), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=JobStatusResponse, ) @@ -236,6 +224,8 @@ async def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ + if not task_id: + raise ValueError(f"Expected a non-empty value for `task_id` but received {task_id!r}") if not job_id: raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") extra_headers = { @@ -248,21 +238,17 @@ async def retrieve( **(extra_headers or {}), } return await self._get( - f"/v1/eval/jobs/{job_id}/result", + f"/v1/eval/tasks/{task_id}/jobs/{job_id}/result", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform({"task_id": task_id}, job_retrieve_params.JobRetrieveParams), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=EvaluateResponse, ) async def cancel( self, - *, job_id: str, + *, task_id: str, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, @@ -283,6 +269,10 @@ async def cancel( timeout: Override the client-level default timeout for this request, in seconds """ + if not task_id: + raise ValueError(f"Expected a non-empty value for `task_id` but received {task_id!r}") + if not job_id: + raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") extra_headers = {"Accept": "*/*", **(extra_headers or {})} extra_headers = { **strip_not_given( @@ -293,15 +283,8 @@ async def cancel( ), **(extra_headers or {}), } - return await self._post( - "/v1/eval/jobs/cancel", - body=await async_maybe_transform( - { - "job_id": job_id, - "task_id": task_id, - }, - job_cancel_params.JobCancelParams, - ), + return await self._delete( + f"/v1/eval/tasks/{task_id}/jobs/{job_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -332,6 +315,8 @@ async def status( timeout: Override the client-level default timeout for this request, in seconds """ + if not task_id: + raise ValueError(f"Expected a non-empty value for `task_id` but received {task_id!r}") if not job_id: raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}") extra_headers = { @@ -344,13 +329,9 @@ async def status( **(extra_headers or {}), } return await self._get( - f"/v1/eval/jobs/{job_id}", + f"/v1/eval/tasks/{task_id}/jobs/{job_id}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform({"task_id": task_id}, job_status_params.JobStatusParams), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=JobStatusResponse, ) diff --git a/src/llama_stack_client/resources/providers.py b/src/llama_stack_client/resources/providers.py index 200b75ae..61b3b7c8 100644 --- a/src/llama_stack_client/resources/providers.py +++ b/src/llama_stack_client/resources/providers.py @@ -75,7 +75,7 @@ def list( **(extra_headers or {}), } return self._get( - "/v1/providers/list", + "/v1/inspect/providers", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -139,7 +139,7 @@ async def list( **(extra_headers or {}), } return await self._get( - "/v1/providers/list", + "/v1/inspect/providers", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/llama_stack_client/resources/routes.py b/src/llama_stack_client/resources/routes.py index c6ff989d..7586fe63 100644 --- a/src/llama_stack_client/resources/routes.py +++ b/src/llama_stack_client/resources/routes.py @@ -2,6 +2,8 @@ from __future__ import annotations +from typing import Type, cast + import httpx from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven @@ -14,6 +16,7 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .._wrappers import DataWrapper from .._base_client import make_request_options from ..types.route_list_response import RouteListResponse @@ -72,11 +75,15 @@ def list( **(extra_headers or {}), } return self._get( - "/v1/routes/list", + "/v1/inspect/routes", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[RouteListResponse]._unwrapper, ), - cast_to=RouteListResponse, + cast_to=cast(Type[RouteListResponse], DataWrapper[RouteListResponse]), ) @@ -132,11 +139,15 @@ async def list( **(extra_headers or {}), } return await self._get( - "/v1/routes/list", + "/v1/inspect/routes", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=DataWrapper[RouteListResponse]._unwrapper, ), - cast_to=RouteListResponse, + cast_to=cast(Type[RouteListResponse], DataWrapper[RouteListResponse]), ) diff --git a/src/llama_stack_client/resources/telemetry.py b/src/llama_stack_client/resources/telemetry.py index 8125d5c5..6e15678b 100644 --- a/src/llama_stack_client/resources/telemetry.py +++ b/src/llama_stack_client/resources/telemetry.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Iterable +from typing import List, Type, Iterable, cast import httpx @@ -27,9 +27,12 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .._wrappers import DataWrapper from ..types.trace import Trace from .._base_client import make_request_options +from ..types.telemetry_get_span_response import TelemetryGetSpanResponse from ..types.telemetry_query_spans_response import TelemetryQuerySpansResponse +from ..types.telemetry_query_traces_response import TelemetryQueryTracesResponse from ..types.telemetry_get_span_tree_response import TelemetryGetSpanTreeResponse __all__ = ["TelemetryResource", "AsyncTelemetryResource"] @@ -55,10 +58,55 @@ def with_streaming_response(self) -> TelemetryResourceWithStreamingResponse: """ return TelemetryResourceWithStreamingResponse(self) - def get_span_tree( + def get_span( self, + span_id: str, *, + trace_id: str, + x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, + x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> TelemetryGetSpanResponse: + """ + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not trace_id: + raise ValueError(f"Expected a non-empty value for `trace_id` but received {trace_id!r}") + if not span_id: + raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}") + extra_headers = { + **strip_not_given( + { + "X-LlamaStack-Client-Version": x_llama_stack_client_version, + "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, + } + ), + **(extra_headers or {}), + } + return self._get( + f"/v1/telemetry/traces/{trace_id}/spans/{span_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=TelemetryGetSpanResponse, + ) + + def get_span_tree( + self, span_id: str, + *, attributes_to_return: List[str] | NotGiven = NOT_GIVEN, max_depth: int | NotGiven = NOT_GIVEN, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, @@ -80,6 +128,8 @@ def get_span_tree( timeout: Override the client-level default timeout for this request, in seconds """ + if not span_id: + raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}") extra_headers = { **strip_not_given( { @@ -89,20 +139,65 @@ def get_span_tree( ), **(extra_headers or {}), } - return self._post( - "/v1/telemetry/query-span-tree", - body=maybe_transform( + return self._get( + f"/v1/telemetry/spans/{span_id}/tree", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "attributes_to_return": attributes_to_return, + "max_depth": max_depth, + }, + telemetry_get_span_tree_params.TelemetryGetSpanTreeParams, + ), + post_parser=DataWrapper[TelemetryGetSpanTreeResponse]._unwrapper, + ), + cast_to=cast(Type[TelemetryGetSpanTreeResponse], DataWrapper[TelemetryGetSpanTreeResponse]), + ) + + def get_trace( + self, + trace_id: str, + *, + x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, + x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Trace: + """ + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not trace_id: + raise ValueError(f"Expected a non-empty value for `trace_id` but received {trace_id!r}") + extra_headers = { + **strip_not_given( { - "span_id": span_id, - "attributes_to_return": attributes_to_return, - "max_depth": max_depth, - }, - telemetry_get_span_tree_params.TelemetryGetSpanTreeParams, + "X-LlamaStack-Client-Version": x_llama_stack_client_version, + "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, + } ), + **(extra_headers or {}), + } + return self._get( + f"/v1/telemetry/traces/{trace_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=TelemetryGetSpanTreeResponse, + cast_to=Trace, ) def log_event( @@ -140,7 +235,7 @@ def log_event( **(extra_headers or {}), } return self._post( - "/v1/telemetry/log-event", + "/v1/telemetry/events", body=maybe_transform( { "event": event, @@ -179,7 +274,6 @@ def query_spans( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -189,20 +283,24 @@ def query_spans( ), **(extra_headers or {}), } - return self._post( - "/v1/telemetry/query-spans", - body=maybe_transform( - { - "attribute_filters": attribute_filters, - "attributes_to_return": attributes_to_return, - "max_depth": max_depth, - }, - telemetry_query_spans_params.TelemetryQuerySpansParams, - ), + return self._get( + "/v1/telemetry/spans", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "attribute_filters": attribute_filters, + "attributes_to_return": attributes_to_return, + "max_depth": max_depth, + }, + telemetry_query_spans_params.TelemetryQuerySpansParams, + ), + post_parser=DataWrapper[TelemetryQuerySpansResponse]._unwrapper, ), - cast_to=TelemetryQuerySpansResponse, + cast_to=cast(Type[TelemetryQuerySpansResponse], DataWrapper[TelemetryQuerySpansResponse]), ) def query_traces( @@ -220,7 +318,7 @@ def query_traces( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Trace: + ) -> TelemetryQueryTracesResponse: """ Args: extra_headers: Send extra headers @@ -231,7 +329,6 @@ def query_traces( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -241,21 +338,25 @@ def query_traces( ), **(extra_headers or {}), } - return self._post( - "/v1/telemetry/query-traces", - body=maybe_transform( - { - "attribute_filters": attribute_filters, - "limit": limit, - "offset": offset, - "order_by": order_by, - }, - telemetry_query_traces_params.TelemetryQueryTracesParams, - ), + return self._get( + "/v1/telemetry/traces", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "attribute_filters": attribute_filters, + "limit": limit, + "offset": offset, + "order_by": order_by, + }, + telemetry_query_traces_params.TelemetryQueryTracesParams, + ), + post_parser=DataWrapper[TelemetryQueryTracesResponse]._unwrapper, ), - cast_to=Trace, + cast_to=cast(Type[TelemetryQueryTracesResponse], DataWrapper[TelemetryQueryTracesResponse]), ) def save_spans_to_dataset( @@ -295,7 +396,7 @@ def save_spans_to_dataset( **(extra_headers or {}), } return self._post( - "/v1/telemetry/save-spans-to-dataset", + "/v1/telemetry/spans/export", body=maybe_transform( { "attribute_filters": attribute_filters, @@ -332,10 +433,55 @@ def with_streaming_response(self) -> AsyncTelemetryResourceWithStreamingResponse """ return AsyncTelemetryResourceWithStreamingResponse(self) - async def get_span_tree( + async def get_span( self, + span_id: str, *, + trace_id: str, + x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, + x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> TelemetryGetSpanResponse: + """ + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not trace_id: + raise ValueError(f"Expected a non-empty value for `trace_id` but received {trace_id!r}") + if not span_id: + raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}") + extra_headers = { + **strip_not_given( + { + "X-LlamaStack-Client-Version": x_llama_stack_client_version, + "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, + } + ), + **(extra_headers or {}), + } + return await self._get( + f"/v1/telemetry/traces/{trace_id}/spans/{span_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=TelemetryGetSpanResponse, + ) + + async def get_span_tree( + self, span_id: str, + *, attributes_to_return: List[str] | NotGiven = NOT_GIVEN, max_depth: int | NotGiven = NOT_GIVEN, x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, @@ -357,6 +503,8 @@ async def get_span_tree( timeout: Override the client-level default timeout for this request, in seconds """ + if not span_id: + raise ValueError(f"Expected a non-empty value for `span_id` but received {span_id!r}") extra_headers = { **strip_not_given( { @@ -366,20 +514,65 @@ async def get_span_tree( ), **(extra_headers or {}), } - return await self._post( - "/v1/telemetry/query-span-tree", - body=await async_maybe_transform( + return await self._get( + f"/v1/telemetry/spans/{span_id}/tree", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "attributes_to_return": attributes_to_return, + "max_depth": max_depth, + }, + telemetry_get_span_tree_params.TelemetryGetSpanTreeParams, + ), + post_parser=DataWrapper[TelemetryGetSpanTreeResponse]._unwrapper, + ), + cast_to=cast(Type[TelemetryGetSpanTreeResponse], DataWrapper[TelemetryGetSpanTreeResponse]), + ) + + async def get_trace( + self, + trace_id: str, + *, + x_llama_stack_client_version: str | NotGiven = NOT_GIVEN, + x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Trace: + """ + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not trace_id: + raise ValueError(f"Expected a non-empty value for `trace_id` but received {trace_id!r}") + extra_headers = { + **strip_not_given( { - "span_id": span_id, - "attributes_to_return": attributes_to_return, - "max_depth": max_depth, - }, - telemetry_get_span_tree_params.TelemetryGetSpanTreeParams, + "X-LlamaStack-Client-Version": x_llama_stack_client_version, + "X-LlamaStack-Provider-Data": x_llama_stack_provider_data, + } ), + **(extra_headers or {}), + } + return await self._get( + f"/v1/telemetry/traces/{trace_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=TelemetryGetSpanTreeResponse, + cast_to=Trace, ) async def log_event( @@ -417,7 +610,7 @@ async def log_event( **(extra_headers or {}), } return await self._post( - "/v1/telemetry/log-event", + "/v1/telemetry/events", body=await async_maybe_transform( { "event": event, @@ -456,7 +649,6 @@ async def query_spans( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -466,20 +658,24 @@ async def query_spans( ), **(extra_headers or {}), } - return await self._post( - "/v1/telemetry/query-spans", - body=await async_maybe_transform( - { - "attribute_filters": attribute_filters, - "attributes_to_return": attributes_to_return, - "max_depth": max_depth, - }, - telemetry_query_spans_params.TelemetryQuerySpansParams, - ), + return await self._get( + "/v1/telemetry/spans", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "attribute_filters": attribute_filters, + "attributes_to_return": attributes_to_return, + "max_depth": max_depth, + }, + telemetry_query_spans_params.TelemetryQuerySpansParams, + ), + post_parser=DataWrapper[TelemetryQuerySpansResponse]._unwrapper, ), - cast_to=TelemetryQuerySpansResponse, + cast_to=cast(Type[TelemetryQuerySpansResponse], DataWrapper[TelemetryQuerySpansResponse]), ) async def query_traces( @@ -497,7 +693,7 @@ async def query_traces( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Trace: + ) -> TelemetryQueryTracesResponse: """ Args: extra_headers: Send extra headers @@ -508,7 +704,6 @@ async def query_traces( timeout: Override the client-level default timeout for this request, in seconds """ - extra_headers = {"Accept": "application/jsonl", **(extra_headers or {})} extra_headers = { **strip_not_given( { @@ -518,21 +713,25 @@ async def query_traces( ), **(extra_headers or {}), } - return await self._post( - "/v1/telemetry/query-traces", - body=await async_maybe_transform( - { - "attribute_filters": attribute_filters, - "limit": limit, - "offset": offset, - "order_by": order_by, - }, - telemetry_query_traces_params.TelemetryQueryTracesParams, - ), + return await self._get( + "/v1/telemetry/traces", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "attribute_filters": attribute_filters, + "limit": limit, + "offset": offset, + "order_by": order_by, + }, + telemetry_query_traces_params.TelemetryQueryTracesParams, + ), + post_parser=DataWrapper[TelemetryQueryTracesResponse]._unwrapper, ), - cast_to=Trace, + cast_to=cast(Type[TelemetryQueryTracesResponse], DataWrapper[TelemetryQueryTracesResponse]), ) async def save_spans_to_dataset( @@ -572,7 +771,7 @@ async def save_spans_to_dataset( **(extra_headers or {}), } return await self._post( - "/v1/telemetry/save-spans-to-dataset", + "/v1/telemetry/spans/export", body=await async_maybe_transform( { "attribute_filters": attribute_filters, @@ -593,9 +792,15 @@ class TelemetryResourceWithRawResponse: def __init__(self, telemetry: TelemetryResource) -> None: self._telemetry = telemetry + self.get_span = to_raw_response_wrapper( + telemetry.get_span, + ) self.get_span_tree = to_raw_response_wrapper( telemetry.get_span_tree, ) + self.get_trace = to_raw_response_wrapper( + telemetry.get_trace, + ) self.log_event = to_raw_response_wrapper( telemetry.log_event, ) @@ -614,9 +819,15 @@ class AsyncTelemetryResourceWithRawResponse: def __init__(self, telemetry: AsyncTelemetryResource) -> None: self._telemetry = telemetry + self.get_span = async_to_raw_response_wrapper( + telemetry.get_span, + ) self.get_span_tree = async_to_raw_response_wrapper( telemetry.get_span_tree, ) + self.get_trace = async_to_raw_response_wrapper( + telemetry.get_trace, + ) self.log_event = async_to_raw_response_wrapper( telemetry.log_event, ) @@ -635,9 +846,15 @@ class TelemetryResourceWithStreamingResponse: def __init__(self, telemetry: TelemetryResource) -> None: self._telemetry = telemetry + self.get_span = to_streamed_response_wrapper( + telemetry.get_span, + ) self.get_span_tree = to_streamed_response_wrapper( telemetry.get_span_tree, ) + self.get_trace = to_streamed_response_wrapper( + telemetry.get_trace, + ) self.log_event = to_streamed_response_wrapper( telemetry.log_event, ) @@ -656,9 +873,15 @@ class AsyncTelemetryResourceWithStreamingResponse: def __init__(self, telemetry: AsyncTelemetryResource) -> None: self._telemetry = telemetry + self.get_span = async_to_streamed_response_wrapper( + telemetry.get_span, + ) self.get_span_tree = async_to_streamed_response_wrapper( telemetry.get_span_tree, ) + self.get_trace = async_to_streamed_response_wrapper( + telemetry.get_trace, + ) self.log_event = async_to_streamed_response_wrapper( telemetry.log_event, ) diff --git a/src/llama_stack_client/types/__init__.py b/src/llama_stack_client/types/__init__.py index 705e3b39..a727d8b0 100644 --- a/src/llama_stack_client/types/__init__.py +++ b/src/llama_stack_client/types/__init__.py @@ -79,6 +79,7 @@ from .inference_completion_params import InferenceCompletionParams as InferenceCompletionParams from .inference_embeddings_params import InferenceEmbeddingsParams as InferenceEmbeddingsParams from .memory_bank_register_params import MemoryBankRegisterParams as MemoryBankRegisterParams +from .telemetry_get_span_response import TelemetryGetSpanResponse as TelemetryGetSpanResponse from .datasetio_append_rows_params import DatasetioAppendRowsParams as DatasetioAppendRowsParams from .scoring_score_batch_response import ScoringScoreBatchResponse as ScoringScoreBatchResponse from .telemetry_query_spans_params import TelemetryQuerySpansParams as TelemetryQuerySpansParams @@ -90,6 +91,7 @@ from .telemetry_get_span_tree_params import TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams from .telemetry_query_spans_response import TelemetryQuerySpansResponse as TelemetryQuerySpansResponse from .tool_runtime_list_tools_params import ToolRuntimeListToolsParams as ToolRuntimeListToolsParams +from .telemetry_query_traces_response import TelemetryQueryTracesResponse as TelemetryQueryTracesResponse from .tool_runtime_invoke_tool_params import ToolRuntimeInvokeToolParams as ToolRuntimeInvokeToolParams from .inference_chat_completion_params import InferenceChatCompletionParams as InferenceChatCompletionParams from .scoring_function_register_params import ScoringFunctionRegisterParams as ScoringFunctionRegisterParams diff --git a/src/llama_stack_client/types/eval/__init__.py b/src/llama_stack_client/types/eval/__init__.py index 6b25d6b8..a0c3f3bc 100644 --- a/src/llama_stack_client/types/eval/__init__.py +++ b/src/llama_stack_client/types/eval/__init__.py @@ -2,7 +2,4 @@ from __future__ import annotations -from .job_cancel_params import JobCancelParams as JobCancelParams -from .job_status_params import JobStatusParams as JobStatusParams -from .job_retrieve_params import JobRetrieveParams as JobRetrieveParams from .job_status_response import JobStatusResponse as JobStatusResponse diff --git a/src/llama_stack_client/types/eval_evaluate_rows_params.py b/src/llama_stack_client/types/eval_evaluate_rows_params.py index 6c7c9e60..51271f8d 100644 --- a/src/llama_stack_client/types/eval_evaluate_rows_params.py +++ b/src/llama_stack_client/types/eval_evaluate_rows_params.py @@ -35,8 +35,6 @@ class EvalEvaluateRowsParams(TypedDict, total=False): task_config: Required[TaskConfig] - task_id: Required[str] - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/eval_run_eval_params.py b/src/llama_stack_client/types/eval_run_eval_params.py index 1992ae41..0865a74a 100644 --- a/src/llama_stack_client/types/eval_run_eval_params.py +++ b/src/llama_stack_client/types/eval_run_eval_params.py @@ -31,8 +31,6 @@ class EvalRunEvalParams(TypedDict, total=False): task_config: Required[TaskConfig] - task_id: Required[str] - x_llama_stack_client_version: Annotated[str, PropertyInfo(alias="X-LlamaStack-Client-Version")] x_llama_stack_provider_data: Annotated[str, PropertyInfo(alias="X-LlamaStack-Provider-Data")] diff --git a/src/llama_stack_client/types/route_list_response.py b/src/llama_stack_client/types/route_list_response.py index e9c96c11..cec8e0e1 100644 --- a/src/llama_stack_client/types/route_list_response.py +++ b/src/llama_stack_client/types/route_list_response.py @@ -1,10 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict, List +from typing import List from typing_extensions import TypeAlias from .route_info import RouteInfo __all__ = ["RouteListResponse"] -RouteListResponse: TypeAlias = Dict[str, List[RouteInfo]] +RouteListResponse: TypeAlias = List[RouteInfo] diff --git a/src/llama_stack_client/types/telemetry_get_span_response.py b/src/llama_stack_client/types/telemetry_get_span_response.py new file mode 100644 index 00000000..9e50ed0d --- /dev/null +++ b/src/llama_stack_client/types/telemetry_get_span_response.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from datetime import datetime + +from .._models import BaseModel + +__all__ = ["TelemetryGetSpanResponse"] + + +class TelemetryGetSpanResponse(BaseModel): + name: str + + span_id: str + + start_time: datetime + + trace_id: str + + attributes: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None + + end_time: Optional[datetime] = None + + parent_span_id: Optional[str] = None diff --git a/src/llama_stack_client/types/telemetry_get_span_tree_params.py b/src/llama_stack_client/types/telemetry_get_span_tree_params.py index 310cf7ef..c0f814a2 100644 --- a/src/llama_stack_client/types/telemetry_get_span_tree_params.py +++ b/src/llama_stack_client/types/telemetry_get_span_tree_params.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import List -from typing_extensions import Required, Annotated, TypedDict +from typing_extensions import Annotated, TypedDict from .._utils import PropertyInfo @@ -11,8 +11,6 @@ class TelemetryGetSpanTreeParams(TypedDict, total=False): - span_id: Required[str] - attributes_to_return: List[str] max_depth: int diff --git a/src/llama_stack_client/types/telemetry_query_spans_response.py b/src/llama_stack_client/types/telemetry_query_spans_response.py index 17062879..c630efeb 100644 --- a/src/llama_stack_client/types/telemetry_query_spans_response.py +++ b/src/llama_stack_client/types/telemetry_query_spans_response.py @@ -2,13 +2,14 @@ from typing import Dict, List, Union, Optional from datetime import datetime +from typing_extensions import TypeAlias from .._models import BaseModel -__all__ = ["TelemetryQuerySpansResponse"] +__all__ = ["TelemetryQuerySpansResponse", "TelemetryQuerySpansResponseItem"] -class TelemetryQuerySpansResponse(BaseModel): +class TelemetryQuerySpansResponseItem(BaseModel): name: str span_id: str @@ -22,3 +23,6 @@ class TelemetryQuerySpansResponse(BaseModel): end_time: Optional[datetime] = None parent_span_id: Optional[str] = None + + +TelemetryQuerySpansResponse: TypeAlias = List[TelemetryQuerySpansResponseItem] diff --git a/src/llama_stack_client/types/telemetry_query_traces_response.py b/src/llama_stack_client/types/telemetry_query_traces_response.py new file mode 100644 index 00000000..01a1365d --- /dev/null +++ b/src/llama_stack_client/types/telemetry_query_traces_response.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import TypeAlias + +from .trace import Trace + +__all__ = ["TelemetryQueryTracesResponse"] + +TelemetryQueryTracesResponse: TypeAlias = List[Trace] diff --git a/tests/api_resources/eval/test_jobs.py b/tests/api_resources/eval/test_jobs.py index 3b2faa2d..bf5a8865 100644 --- a/tests/api_resources/eval/test_jobs.py +++ b/tests/api_resources/eval/test_jobs.py @@ -64,6 +64,12 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None: @parametrize def test_path_params_retrieve(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_id` but received ''"): + client.eval.jobs.with_raw_response.retrieve( + job_id="job_id", + task_id="", + ) + with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): client.eval.jobs.with_raw_response.retrieve( job_id="", @@ -114,6 +120,20 @@ def test_streaming_response_cancel(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_cancel(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_id` but received ''"): + client.eval.jobs.with_raw_response.cancel( + job_id="job_id", + task_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): + client.eval.jobs.with_raw_response.cancel( + job_id="", + task_id="task_id", + ) + @parametrize def test_method_status(self, client: LlamaStackClient) -> None: job = client.eval.jobs.status( @@ -160,6 +180,12 @@ def test_streaming_response_status(self, client: LlamaStackClient) -> None: @parametrize def test_path_params_status(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_id` but received ''"): + client.eval.jobs.with_raw_response.status( + job_id="job_id", + task_id="", + ) + with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): client.eval.jobs.with_raw_response.status( job_id="", @@ -216,6 +242,12 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl @parametrize async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_id` but received ''"): + await async_client.eval.jobs.with_raw_response.retrieve( + job_id="job_id", + task_id="", + ) + with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): await async_client.eval.jobs.with_raw_response.retrieve( job_id="", @@ -266,6 +298,20 @@ async def test_streaming_response_cancel(self, async_client: AsyncLlamaStackClie assert cast(Any, response.is_closed) is True + @parametrize + async def test_path_params_cancel(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_id` but received ''"): + await async_client.eval.jobs.with_raw_response.cancel( + job_id="job_id", + task_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): + await async_client.eval.jobs.with_raw_response.cancel( + job_id="", + task_id="task_id", + ) + @parametrize async def test_method_status(self, async_client: AsyncLlamaStackClient) -> None: job = await async_client.eval.jobs.status( @@ -312,6 +358,12 @@ async def test_streaming_response_status(self, async_client: AsyncLlamaStackClie @parametrize async def test_path_params_status(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_id` but received ''"): + await async_client.eval.jobs.with_raw_response.status( + job_id="job_id", + task_id="", + ) + with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"): await async_client.eval.jobs.with_raw_response.status( job_id="", diff --git a/tests/api_resources/test_eval.py b/tests/api_resources/test_eval.py index 35cecf84..1e270798 100644 --- a/tests/api_resources/test_eval.py +++ b/tests/api_resources/test_eval.py @@ -20,6 +20,7 @@ class TestEval: @parametrize def test_method_evaluate_rows(self, client: LlamaStackClient) -> None: eval = client.eval.evaluate_rows( + task_id="task_id", input_rows=[{"foo": True}], scoring_functions=["string"], task_config={ @@ -30,13 +31,13 @@ def test_method_evaluate_rows(self, client: LlamaStackClient) -> None: }, "type": "benchmark", }, - task_id="task_id", ) assert_matches_type(EvaluateResponse, eval, path=["response"]) @parametrize def test_method_evaluate_rows_with_all_params(self, client: LlamaStackClient) -> None: eval = client.eval.evaluate_rows( + task_id="task_id", input_rows=[{"foo": True}], scoring_functions=["string"], task_config={ @@ -56,7 +57,6 @@ def test_method_evaluate_rows_with_all_params(self, client: LlamaStackClient) -> "type": "benchmark", "num_examples": 0, }, - task_id="task_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -65,6 +65,7 @@ def test_method_evaluate_rows_with_all_params(self, client: LlamaStackClient) -> @parametrize def test_raw_response_evaluate_rows(self, client: LlamaStackClient) -> None: response = client.eval.with_raw_response.evaluate_rows( + task_id="task_id", input_rows=[{"foo": True}], scoring_functions=["string"], task_config={ @@ -75,7 +76,6 @@ def test_raw_response_evaluate_rows(self, client: LlamaStackClient) -> None: }, "type": "benchmark", }, - task_id="task_id", ) assert response.is_closed is True @@ -86,6 +86,7 @@ def test_raw_response_evaluate_rows(self, client: LlamaStackClient) -> None: @parametrize def test_streaming_response_evaluate_rows(self, client: LlamaStackClient) -> None: with client.eval.with_streaming_response.evaluate_rows( + task_id="task_id", input_rows=[{"foo": True}], scoring_functions=["string"], task_config={ @@ -96,7 +97,6 @@ def test_streaming_response_evaluate_rows(self, client: LlamaStackClient) -> Non }, "type": "benchmark", }, - task_id="task_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -106,9 +106,27 @@ def test_streaming_response_evaluate_rows(self, client: LlamaStackClient) -> Non assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_evaluate_rows(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_id` but received ''"): + client.eval.with_raw_response.evaluate_rows( + task_id="", + input_rows=[{"foo": True}], + scoring_functions=["string"], + task_config={ + "eval_candidate": { + "model": "model", + "sampling_params": {"strategy": {"type": "greedy"}}, + "type": "model", + }, + "type": "benchmark", + }, + ) + @parametrize def test_method_run_eval(self, client: LlamaStackClient) -> None: eval = client.eval.run_eval( + task_id="task_id", task_config={ "eval_candidate": { "model": "model", @@ -117,13 +135,13 @@ def test_method_run_eval(self, client: LlamaStackClient) -> None: }, "type": "benchmark", }, - task_id="task_id", ) assert_matches_type(Job, eval, path=["response"]) @parametrize def test_method_run_eval_with_all_params(self, client: LlamaStackClient) -> None: eval = client.eval.run_eval( + task_id="task_id", task_config={ "eval_candidate": { "model": "model", @@ -141,7 +159,6 @@ def test_method_run_eval_with_all_params(self, client: LlamaStackClient) -> None "type": "benchmark", "num_examples": 0, }, - task_id="task_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -150,6 +167,7 @@ def test_method_run_eval_with_all_params(self, client: LlamaStackClient) -> None @parametrize def test_raw_response_run_eval(self, client: LlamaStackClient) -> None: response = client.eval.with_raw_response.run_eval( + task_id="task_id", task_config={ "eval_candidate": { "model": "model", @@ -158,7 +176,6 @@ def test_raw_response_run_eval(self, client: LlamaStackClient) -> None: }, "type": "benchmark", }, - task_id="task_id", ) assert response.is_closed is True @@ -169,6 +186,7 @@ def test_raw_response_run_eval(self, client: LlamaStackClient) -> None: @parametrize def test_streaming_response_run_eval(self, client: LlamaStackClient) -> None: with client.eval.with_streaming_response.run_eval( + task_id="task_id", task_config={ "eval_candidate": { "model": "model", @@ -177,7 +195,6 @@ def test_streaming_response_run_eval(self, client: LlamaStackClient) -> None: }, "type": "benchmark", }, - task_id="task_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -187,6 +204,21 @@ def test_streaming_response_run_eval(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_run_eval(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_id` but received ''"): + client.eval.with_raw_response.run_eval( + task_id="", + task_config={ + "eval_candidate": { + "model": "model", + "sampling_params": {"strategy": {"type": "greedy"}}, + "type": "model", + }, + "type": "benchmark", + }, + ) + class TestAsyncEval: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -194,6 +226,7 @@ class TestAsyncEval: @parametrize async def test_method_evaluate_rows(self, async_client: AsyncLlamaStackClient) -> None: eval = await async_client.eval.evaluate_rows( + task_id="task_id", input_rows=[{"foo": True}], scoring_functions=["string"], task_config={ @@ -204,13 +237,13 @@ async def test_method_evaluate_rows(self, async_client: AsyncLlamaStackClient) - }, "type": "benchmark", }, - task_id="task_id", ) assert_matches_type(EvaluateResponse, eval, path=["response"]) @parametrize async def test_method_evaluate_rows_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: eval = await async_client.eval.evaluate_rows( + task_id="task_id", input_rows=[{"foo": True}], scoring_functions=["string"], task_config={ @@ -230,7 +263,6 @@ async def test_method_evaluate_rows_with_all_params(self, async_client: AsyncLla "type": "benchmark", "num_examples": 0, }, - task_id="task_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -239,6 +271,7 @@ async def test_method_evaluate_rows_with_all_params(self, async_client: AsyncLla @parametrize async def test_raw_response_evaluate_rows(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.eval.with_raw_response.evaluate_rows( + task_id="task_id", input_rows=[{"foo": True}], scoring_functions=["string"], task_config={ @@ -249,7 +282,6 @@ async def test_raw_response_evaluate_rows(self, async_client: AsyncLlamaStackCli }, "type": "benchmark", }, - task_id="task_id", ) assert response.is_closed is True @@ -260,6 +292,7 @@ async def test_raw_response_evaluate_rows(self, async_client: AsyncLlamaStackCli @parametrize async def test_streaming_response_evaluate_rows(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.eval.with_streaming_response.evaluate_rows( + task_id="task_id", input_rows=[{"foo": True}], scoring_functions=["string"], task_config={ @@ -270,7 +303,6 @@ async def test_streaming_response_evaluate_rows(self, async_client: AsyncLlamaSt }, "type": "benchmark", }, - task_id="task_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -280,9 +312,27 @@ async def test_streaming_response_evaluate_rows(self, async_client: AsyncLlamaSt assert cast(Any, response.is_closed) is True + @parametrize + async def test_path_params_evaluate_rows(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_id` but received ''"): + await async_client.eval.with_raw_response.evaluate_rows( + task_id="", + input_rows=[{"foo": True}], + scoring_functions=["string"], + task_config={ + "eval_candidate": { + "model": "model", + "sampling_params": {"strategy": {"type": "greedy"}}, + "type": "model", + }, + "type": "benchmark", + }, + ) + @parametrize async def test_method_run_eval(self, async_client: AsyncLlamaStackClient) -> None: eval = await async_client.eval.run_eval( + task_id="task_id", task_config={ "eval_candidate": { "model": "model", @@ -291,13 +341,13 @@ async def test_method_run_eval(self, async_client: AsyncLlamaStackClient) -> Non }, "type": "benchmark", }, - task_id="task_id", ) assert_matches_type(Job, eval, path=["response"]) @parametrize async def test_method_run_eval_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: eval = await async_client.eval.run_eval( + task_id="task_id", task_config={ "eval_candidate": { "model": "model", @@ -315,7 +365,6 @@ async def test_method_run_eval_with_all_params(self, async_client: AsyncLlamaSta "type": "benchmark", "num_examples": 0, }, - task_id="task_id", x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) @@ -324,6 +373,7 @@ async def test_method_run_eval_with_all_params(self, async_client: AsyncLlamaSta @parametrize async def test_raw_response_run_eval(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.eval.with_raw_response.run_eval( + task_id="task_id", task_config={ "eval_candidate": { "model": "model", @@ -332,7 +382,6 @@ async def test_raw_response_run_eval(self, async_client: AsyncLlamaStackClient) }, "type": "benchmark", }, - task_id="task_id", ) assert response.is_closed is True @@ -343,6 +392,7 @@ async def test_raw_response_run_eval(self, async_client: AsyncLlamaStackClient) @parametrize async def test_streaming_response_run_eval(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.eval.with_streaming_response.run_eval( + task_id="task_id", task_config={ "eval_candidate": { "model": "model", @@ -351,7 +401,6 @@ async def test_streaming_response_run_eval(self, async_client: AsyncLlamaStackCl }, "type": "benchmark", }, - task_id="task_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -360,3 +409,18 @@ async def test_streaming_response_run_eval(self, async_client: AsyncLlamaStackCl assert_matches_type(Job, eval, path=["response"]) assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_run_eval(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_id` but received ''"): + await async_client.eval.with_raw_response.run_eval( + task_id="", + task_config={ + "eval_candidate": { + "model": "model", + "sampling_params": {"strategy": {"type": "greedy"}}, + "type": "model", + }, + "type": "benchmark", + }, + ) diff --git a/tests/api_resources/test_telemetry.py b/tests/api_resources/test_telemetry.py index 8a43c5ce..bb192330 100644 --- a/tests/api_resources/test_telemetry.py +++ b/tests/api_resources/test_telemetry.py @@ -11,8 +11,10 @@ from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient from llama_stack_client.types import ( Trace, + TelemetryGetSpanResponse, TelemetryQuerySpansResponse, TelemetryGetSpanTreeResponse, + TelemetryQueryTracesResponse, ) from llama_stack_client._utils import parse_datetime @@ -22,6 +24,64 @@ class TestTelemetry: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + @parametrize + def test_method_get_span(self, client: LlamaStackClient) -> None: + telemetry = client.telemetry.get_span( + span_id="span_id", + trace_id="trace_id", + ) + assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"]) + + @parametrize + def test_method_get_span_with_all_params(self, client: LlamaStackClient) -> None: + telemetry = client.telemetry.get_span( + span_id="span_id", + trace_id="trace_id", + x_llama_stack_client_version="X-LlamaStack-Client-Version", + x_llama_stack_provider_data="X-LlamaStack-Provider-Data", + ) + assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"]) + + @parametrize + def test_raw_response_get_span(self, client: LlamaStackClient) -> None: + response = client.telemetry.with_raw_response.get_span( + span_id="span_id", + trace_id="trace_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + telemetry = response.parse() + assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"]) + + @parametrize + def test_streaming_response_get_span(self, client: LlamaStackClient) -> None: + with client.telemetry.with_streaming_response.get_span( + span_id="span_id", + trace_id="trace_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + telemetry = response.parse() + assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_get_span(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `trace_id` but received ''"): + client.telemetry.with_raw_response.get_span( + span_id="span_id", + trace_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `span_id` but received ''"): + client.telemetry.with_raw_response.get_span( + span_id="", + trace_id="trace_id", + ) + @parametrize def test_method_get_span_tree(self, client: LlamaStackClient) -> None: telemetry = client.telemetry.get_span_tree( @@ -64,6 +124,60 @@ def test_streaming_response_get_span_tree(self, client: LlamaStackClient) -> Non assert cast(Any, response.is_closed) is True + @parametrize + def test_path_params_get_span_tree(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `span_id` but received ''"): + client.telemetry.with_raw_response.get_span_tree( + span_id="", + ) + + @parametrize + def test_method_get_trace(self, client: LlamaStackClient) -> None: + telemetry = client.telemetry.get_trace( + trace_id="trace_id", + ) + assert_matches_type(Trace, telemetry, path=["response"]) + + @parametrize + def test_method_get_trace_with_all_params(self, client: LlamaStackClient) -> None: + telemetry = client.telemetry.get_trace( + trace_id="trace_id", + x_llama_stack_client_version="X-LlamaStack-Client-Version", + x_llama_stack_provider_data="X-LlamaStack-Provider-Data", + ) + assert_matches_type(Trace, telemetry, path=["response"]) + + @parametrize + def test_raw_response_get_trace(self, client: LlamaStackClient) -> None: + response = client.telemetry.with_raw_response.get_trace( + trace_id="trace_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + telemetry = response.parse() + assert_matches_type(Trace, telemetry, path=["response"]) + + @parametrize + def test_streaming_response_get_trace(self, client: LlamaStackClient) -> None: + with client.telemetry.with_streaming_response.get_trace( + trace_id="trace_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + telemetry = response.parse() + assert_matches_type(Trace, telemetry, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_get_trace(self, client: LlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `trace_id` but received ''"): + client.telemetry.with_raw_response.get_trace( + trace_id="", + ) + @parametrize def test_method_log_event(self, client: LlamaStackClient) -> None: telemetry = client.telemetry.log_event( @@ -137,9 +251,6 @@ def test_streaming_response_log_event(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_query_spans(self, client: LlamaStackClient) -> None: telemetry = client.telemetry.query_spans( @@ -154,9 +265,6 @@ def test_method_query_spans(self, client: LlamaStackClient) -> None: ) assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_query_spans_with_all_params(self, client: LlamaStackClient) -> None: telemetry = client.telemetry.query_spans( @@ -174,9 +282,6 @@ def test_method_query_spans_with_all_params(self, client: LlamaStackClient) -> N ) assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_raw_response_query_spans(self, client: LlamaStackClient) -> None: response = client.telemetry.with_raw_response.query_spans( @@ -195,9 +300,6 @@ def test_raw_response_query_spans(self, client: LlamaStackClient) -> None: telemetry = response.parse() assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_streaming_response_query_spans(self, client: LlamaStackClient) -> None: with client.telemetry.with_streaming_response.query_spans( @@ -218,17 +320,11 @@ def test_streaming_response_query_spans(self, client: LlamaStackClient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_query_traces(self, client: LlamaStackClient) -> None: telemetry = client.telemetry.query_traces() - assert_matches_type(Trace, telemetry, path=["response"]) + assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_method_query_traces_with_all_params(self, client: LlamaStackClient) -> None: telemetry = client.telemetry.query_traces( @@ -245,11 +341,8 @@ def test_method_query_traces_with_all_params(self, client: LlamaStackClient) -> x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(Trace, telemetry, path=["response"]) + assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_raw_response_query_traces(self, client: LlamaStackClient) -> None: response = client.telemetry.with_raw_response.query_traces() @@ -257,11 +350,8 @@ def test_raw_response_query_traces(self, client: LlamaStackClient) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" telemetry = response.parse() - assert_matches_type(Trace, telemetry, path=["response"]) + assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize def test_streaming_response_query_traces(self, client: LlamaStackClient) -> None: with client.telemetry.with_streaming_response.query_traces() as response: @@ -269,7 +359,7 @@ def test_streaming_response_query_traces(self, client: LlamaStackClient) -> None assert response.http_request.headers.get("X-Stainless-Lang") == "python" telemetry = response.parse() - assert_matches_type(Trace, telemetry, path=["response"]) + assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"]) assert cast(Any, response.is_closed) is True @@ -350,6 +440,64 @@ def test_streaming_response_save_spans_to_dataset(self, client: LlamaStackClient class TestAsyncTelemetry: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + @parametrize + async def test_method_get_span(self, async_client: AsyncLlamaStackClient) -> None: + telemetry = await async_client.telemetry.get_span( + span_id="span_id", + trace_id="trace_id", + ) + assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"]) + + @parametrize + async def test_method_get_span_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: + telemetry = await async_client.telemetry.get_span( + span_id="span_id", + trace_id="trace_id", + x_llama_stack_client_version="X-LlamaStack-Client-Version", + x_llama_stack_provider_data="X-LlamaStack-Provider-Data", + ) + assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"]) + + @parametrize + async def test_raw_response_get_span(self, async_client: AsyncLlamaStackClient) -> None: + response = await async_client.telemetry.with_raw_response.get_span( + span_id="span_id", + trace_id="trace_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + telemetry = await response.parse() + assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"]) + + @parametrize + async def test_streaming_response_get_span(self, async_client: AsyncLlamaStackClient) -> None: + async with async_client.telemetry.with_streaming_response.get_span( + span_id="span_id", + trace_id="trace_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + telemetry = await response.parse() + assert_matches_type(TelemetryGetSpanResponse, telemetry, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_get_span(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `trace_id` but received ''"): + await async_client.telemetry.with_raw_response.get_span( + span_id="span_id", + trace_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `span_id` but received ''"): + await async_client.telemetry.with_raw_response.get_span( + span_id="", + trace_id="trace_id", + ) + @parametrize async def test_method_get_span_tree(self, async_client: AsyncLlamaStackClient) -> None: telemetry = await async_client.telemetry.get_span_tree( @@ -392,6 +540,60 @@ async def test_streaming_response_get_span_tree(self, async_client: AsyncLlamaSt assert cast(Any, response.is_closed) is True + @parametrize + async def test_path_params_get_span_tree(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `span_id` but received ''"): + await async_client.telemetry.with_raw_response.get_span_tree( + span_id="", + ) + + @parametrize + async def test_method_get_trace(self, async_client: AsyncLlamaStackClient) -> None: + telemetry = await async_client.telemetry.get_trace( + trace_id="trace_id", + ) + assert_matches_type(Trace, telemetry, path=["response"]) + + @parametrize + async def test_method_get_trace_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: + telemetry = await async_client.telemetry.get_trace( + trace_id="trace_id", + x_llama_stack_client_version="X-LlamaStack-Client-Version", + x_llama_stack_provider_data="X-LlamaStack-Provider-Data", + ) + assert_matches_type(Trace, telemetry, path=["response"]) + + @parametrize + async def test_raw_response_get_trace(self, async_client: AsyncLlamaStackClient) -> None: + response = await async_client.telemetry.with_raw_response.get_trace( + trace_id="trace_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + telemetry = await response.parse() + assert_matches_type(Trace, telemetry, path=["response"]) + + @parametrize + async def test_streaming_response_get_trace(self, async_client: AsyncLlamaStackClient) -> None: + async with async_client.telemetry.with_streaming_response.get_trace( + trace_id="trace_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + telemetry = await response.parse() + assert_matches_type(Trace, telemetry, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_get_trace(self, async_client: AsyncLlamaStackClient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `trace_id` but received ''"): + await async_client.telemetry.with_raw_response.get_trace( + trace_id="", + ) + @parametrize async def test_method_log_event(self, async_client: AsyncLlamaStackClient) -> None: telemetry = await async_client.telemetry.log_event( @@ -465,9 +667,6 @@ async def test_streaming_response_log_event(self, async_client: AsyncLlamaStackC assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_query_spans(self, async_client: AsyncLlamaStackClient) -> None: telemetry = await async_client.telemetry.query_spans( @@ -482,9 +681,6 @@ async def test_method_query_spans(self, async_client: AsyncLlamaStackClient) -> ) assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_query_spans_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: telemetry = await async_client.telemetry.query_spans( @@ -502,9 +698,6 @@ async def test_method_query_spans_with_all_params(self, async_client: AsyncLlama ) assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_raw_response_query_spans(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.telemetry.with_raw_response.query_spans( @@ -523,9 +716,6 @@ async def test_raw_response_query_spans(self, async_client: AsyncLlamaStackClien telemetry = await response.parse() assert_matches_type(TelemetryQuerySpansResponse, telemetry, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_streaming_response_query_spans(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.telemetry.with_streaming_response.query_spans( @@ -546,17 +736,11 @@ async def test_streaming_response_query_spans(self, async_client: AsyncLlamaStac assert cast(Any, response.is_closed) is True - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_query_traces(self, async_client: AsyncLlamaStackClient) -> None: telemetry = await async_client.telemetry.query_traces() - assert_matches_type(Trace, telemetry, path=["response"]) + assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_method_query_traces_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: telemetry = await async_client.telemetry.query_traces( @@ -573,11 +757,8 @@ async def test_method_query_traces_with_all_params(self, async_client: AsyncLlam x_llama_stack_client_version="X-LlamaStack-Client-Version", x_llama_stack_provider_data="X-LlamaStack-Provider-Data", ) - assert_matches_type(Trace, telemetry, path=["response"]) + assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_raw_response_query_traces(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.telemetry.with_raw_response.query_traces() @@ -585,11 +766,8 @@ async def test_raw_response_query_traces(self, async_client: AsyncLlamaStackClie assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" telemetry = await response.parse() - assert_matches_type(Trace, telemetry, path=["response"]) + assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"]) - @pytest.mark.skip( - reason="currently no good way to test endpoints with content type application/jsonl, Prism mock server will fail" - ) @parametrize async def test_streaming_response_query_traces(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.telemetry.with_streaming_response.query_traces() as response: @@ -597,7 +775,7 @@ async def test_streaming_response_query_traces(self, async_client: AsyncLlamaSta assert response.http_request.headers.get("X-Stainless-Lang") == "python" telemetry = await response.parse() - assert_matches_type(Trace, telemetry, path=["response"]) + assert_matches_type(TelemetryQueryTracesResponse, telemetry, path=["response"]) assert cast(Any, response.is_closed) is True