Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion src/llama_stack_client/_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,13 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
return cast(R, response)

if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel):
if (
inspect.isclass(
origin # pyright: ignore[reportUnknownArgumentType]
)
and not issubclass(origin, BaseModel)
and issubclass(origin, pydantic.BaseModel)
):
raise TypeError(
"Pydantic models must subclass our base model type, e.g. `from llama_stack_client import BaseModel`"
)
Expand Down
12 changes: 12 additions & 0 deletions src/llama_stack_client/types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,34 +46,44 @@
from .agent_create_params import AgentCreateParams as AgentCreateParams
from .completion_response import CompletionResponse as CompletionResponse
from .embeddings_response import EmbeddingsResponse as EmbeddingsResponse
from .list_tools_response import ListToolsResponse as ListToolsResponse
from .memory_query_params import MemoryQueryParams as MemoryQueryParams
from .model_list_response import ModelListResponse as ModelListResponse
from .route_list_response import RouteListResponse as RouteListResponse
from .run_shield_response import RunShieldResponse as RunShieldResponse
from .tool_execution_step import ToolExecutionStep as ToolExecutionStep
from .eval_run_eval_params import EvalRunEvalParams as EvalRunEvalParams
from .list_models_response import ListModelsResponse as ListModelsResponse
from .list_routes_response import ListRoutesResponse as ListRoutesResponse
from .memory_insert_params import MemoryInsertParams as MemoryInsertParams
from .query_spans_response import QuerySpansResponse as QuerySpansResponse
from .scoring_score_params import ScoringScoreParams as ScoringScoreParams
from .shield_list_response import ShieldListResponse as ShieldListResponse
from .agent_create_response import AgentCreateResponse as AgentCreateResponse
from .dataset_list_response import DatasetListResponse as DatasetListResponse
from .list_shields_response import ListShieldsResponse as ListShieldsResponse
from .memory_retrieval_step import MemoryRetrievalStep as MemoryRetrievalStep
from .model_register_params import ModelRegisterParams as ModelRegisterParams
from .paginated_rows_result import PaginatedRowsResult as PaginatedRowsResult
from .list_datasets_response import ListDatasetsResponse as ListDatasetsResponse
from .provider_list_response import ProviderListResponse as ProviderListResponse
from .scoring_score_response import ScoringScoreResponse as ScoringScoreResponse
from .shield_register_params import ShieldRegisterParams as ShieldRegisterParams
from .tool_invocation_result import ToolInvocationResult as ToolInvocationResult
from .dataset_register_params import DatasetRegisterParams as DatasetRegisterParams
from .eval_task_list_response import EvalTaskListResponse as EvalTaskListResponse
from .list_providers_response import ListProvidersResponse as ListProvidersResponse
from .toolgroup_list_response import ToolgroupListResponse as ToolgroupListResponse
from .list_eval_tasks_response import ListEvalTasksResponse as ListEvalTasksResponse
from .query_documents_response import QueryDocumentsResponse as QueryDocumentsResponse
from .safety_run_shield_params import SafetyRunShieldParams as SafetyRunShieldParams
from .dataset_retrieve_response import DatasetRetrieveResponse as DatasetRetrieveResponse
from .eval_evaluate_rows_params import EvalEvaluateRowsParams as EvalEvaluateRowsParams
from .eval_task_register_params import EvalTaskRegisterParams as EvalTaskRegisterParams
from .list_tool_groups_response import ListToolGroupsResponse as ListToolGroupsResponse
from .memory_bank_list_response import MemoryBankListResponse as MemoryBankListResponse
from .toolgroup_register_params import ToolgroupRegisterParams as ToolgroupRegisterParams
from .list_memory_banks_response import ListMemoryBanksResponse as ListMemoryBanksResponse
from .scoring_score_batch_params import ScoringScoreBatchParams as ScoringScoreBatchParams
from .telemetry_log_event_params import TelemetryLogEventParams as TelemetryLogEventParams
from .inference_completion_params import InferenceCompletionParams as InferenceCompletionParams
Expand All @@ -91,9 +101,11 @@
from .telemetry_get_span_tree_params import TelemetryGetSpanTreeParams as TelemetryGetSpanTreeParams
from .telemetry_query_spans_response import TelemetryQuerySpansResponse as TelemetryQuerySpansResponse
from .tool_runtime_list_tools_params import ToolRuntimeListToolsParams as ToolRuntimeListToolsParams
from .list_scoring_functions_response import ListScoringFunctionsResponse as ListScoringFunctionsResponse
from .telemetry_query_traces_response import TelemetryQueryTracesResponse as TelemetryQueryTracesResponse
from .tool_runtime_invoke_tool_params import ToolRuntimeInvokeToolParams as ToolRuntimeInvokeToolParams
from .inference_chat_completion_params import InferenceChatCompletionParams as InferenceChatCompletionParams
from .list_post_training_jobs_response import ListPostTrainingJobsResponse as ListPostTrainingJobsResponse
from .scoring_function_register_params import ScoringFunctionRegisterParams as ScoringFunctionRegisterParams
from .telemetry_get_span_tree_response import TelemetryGetSpanTreeResponse as TelemetryGetSpanTreeResponse
from .batch_inference_completion_params import BatchInferenceCompletionParams as BatchInferenceCompletionParams
Expand Down
30 changes: 30 additions & 0 deletions src/llama_stack_client/types/list_datasets_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import Dict, List, Union
from typing_extensions import Literal

from .._models import BaseModel
from .shared.url import URL
from .shared.param_type import ParamType

__all__ = ["ListDatasetsResponse", "Data"]


class Data(BaseModel):
dataset_schema: Dict[str, ParamType]

identifier: str

metadata: Dict[str, Union[bool, float, str, List[object], object, None]]

provider_id: str

provider_resource_id: str

type: Literal["dataset"]

url: URL


class ListDatasetsResponse(BaseModel):
data: List[Data]
12 changes: 12 additions & 0 deletions src/llama_stack_client/types/list_eval_tasks_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List

from .._models import BaseModel
from .eval_task import EvalTask

__all__ = ["ListEvalTasksResponse"]


class ListEvalTasksResponse(BaseModel):
data: List[EvalTask]
78 changes: 78 additions & 0 deletions src/llama_stack_client/types/list_memory_banks_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List, Union, Optional
from typing_extensions import Literal, TypeAlias

from .._models import BaseModel

__all__ = [
"ListMemoryBanksResponse",
"Data",
"DataVectorMemoryBank",
"DataKeyValueMemoryBank",
"DataKeywordMemoryBank",
"DataGraphMemoryBank",
]


class DataVectorMemoryBank(BaseModel):
chunk_size_in_tokens: int

embedding_model: str

identifier: str

memory_bank_type: Literal["vector"]

provider_id: str

provider_resource_id: str

type: Literal["memory_bank"]

embedding_dimension: Optional[int] = None

overlap_size_in_tokens: Optional[int] = None


class DataKeyValueMemoryBank(BaseModel):
identifier: str

memory_bank_type: Literal["keyvalue"]

provider_id: str

provider_resource_id: str

type: Literal["memory_bank"]


class DataKeywordMemoryBank(BaseModel):
identifier: str

memory_bank_type: Literal["keyword"]

provider_id: str

provider_resource_id: str

type: Literal["memory_bank"]


class DataGraphMemoryBank(BaseModel):
identifier: str

memory_bank_type: Literal["graph"]

provider_id: str

provider_resource_id: str

type: Literal["memory_bank"]


Data: TypeAlias = Union[DataVectorMemoryBank, DataKeyValueMemoryBank, DataKeywordMemoryBank, DataGraphMemoryBank]


class ListMemoryBanksResponse(BaseModel):
data: List[Data]
12 changes: 12 additions & 0 deletions src/llama_stack_client/types/list_models_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List

from .model import Model
from .._models import BaseModel

__all__ = ["ListModelsResponse"]


class ListModelsResponse(BaseModel):
data: List[Model]
15 changes: 15 additions & 0 deletions src/llama_stack_client/types/list_post_training_jobs_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List

from .._models import BaseModel

__all__ = ["ListPostTrainingJobsResponse", "Data"]


class Data(BaseModel):
job_uuid: str


class ListPostTrainingJobsResponse(BaseModel):
data: List[Data]
12 changes: 12 additions & 0 deletions src/llama_stack_client/types/list_providers_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List

from .._models import BaseModel
from .provider_info import ProviderInfo

__all__ = ["ListProvidersResponse"]


class ListProvidersResponse(BaseModel):
data: List[ProviderInfo]
12 changes: 12 additions & 0 deletions src/llama_stack_client/types/list_routes_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List

from .._models import BaseModel
from .route_info import RouteInfo

__all__ = ["ListRoutesResponse"]


class ListRoutesResponse(BaseModel):
data: List[RouteInfo]
12 changes: 12 additions & 0 deletions src/llama_stack_client/types/list_scoring_functions_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List

from .._models import BaseModel
from .scoring_fn import ScoringFn

__all__ = ["ListScoringFunctionsResponse"]


class ListScoringFunctionsResponse(BaseModel):
data: List[ScoringFn]
12 changes: 12 additions & 0 deletions src/llama_stack_client/types/list_shields_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List

from .shield import Shield
from .._models import BaseModel

__all__ = ["ListShieldsResponse"]


class ListShieldsResponse(BaseModel):
data: List[Shield]
12 changes: 12 additions & 0 deletions src/llama_stack_client/types/list_tool_groups_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List

from .._models import BaseModel
from .tool_group import ToolGroup

__all__ = ["ListToolGroupsResponse"]


class ListToolGroupsResponse(BaseModel):
data: List[ToolGroup]
12 changes: 12 additions & 0 deletions src/llama_stack_client/types/list_tools_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List

from .tool import Tool
from .._models import BaseModel

__all__ = ["ListToolsResponse"]


class ListToolsResponse(BaseModel):
data: List[Tool]
28 changes: 28 additions & 0 deletions src/llama_stack_client/types/query_spans_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import Dict, List, Union, Optional
from datetime import datetime

from .._models import BaseModel

__all__ = ["QuerySpansResponse", "Data"]


class Data(BaseModel):
name: str

span_id: str

start_time: datetime

trace_id: str

attributes: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None

end_time: Optional[datetime] = None

parent_span_id: Optional[str] = None


class QuerySpansResponse(BaseModel):
data: List[Data]
Loading