From de833a31e151209117ad1930a552a8717ad1cec3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 21:05:01 +0000 Subject: [PATCH] feat(api): vector_db_id -> vector_store_id --- .stats.yml | 4 +- .../resources/moderations.py | 10 +- .../resources/tool_runtime/rag_tool.py | 24 +- src/llama_stack_client/resources/vector_io.py | 24 +- .../types/alpha/memory_retrieval_step.py | 2 +- .../types/moderation_create_params.py | 4 +- .../types/response_create_params.py | 325 +++++++++--------- .../types/response_list_response.py | 325 +++++++++--------- .../responses/input_item_list_response.py | 325 +++++++++--------- .../tool_runtime/rag_tool_insert_params.py | 2 +- .../tool_runtime/rag_tool_query_params.py | 2 +- .../types/vector_io_insert_params.py | 2 +- .../types/vector_io_query_params.py | 2 +- tests/api_resources/test_moderations.py | 18 +- tests/api_resources/test_vector_io.py | 32 +- .../tool_runtime/test_rag_tool.py | 28 +- 16 files changed, 571 insertions(+), 558 deletions(-) diff --git a/.stats.yml b/.stats.yml index aadaf6e2..49885bb5 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 104 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-97b91eca4a3ff251edc02636b1a638866675d6c1abd46cd9fc18bc50a1de9656.yml -openapi_spec_hash: 7302f1aa50090e3de78e34c184371267 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-35c6569e5e9fcc85084c9728eb7fc7c5908297fcc77043d621d25de3c850a990.yml +openapi_spec_hash: 0f95bbeee16f3205d36ec34cfa62c711 config_hash: a3829dbdaa491194d01f399784d532cd diff --git a/src/llama_stack_client/resources/moderations.py b/src/llama_stack_client/resources/moderations.py index 420641ac..aaa03690 100644 --- a/src/llama_stack_client/resources/moderations.py +++ b/src/llama_stack_client/resources/moderations.py @@ -13,7 +13,7 @@ import httpx from ..types import moderation_create_params -from .._types import Body, Query, Headers, NotGiven, SequenceNotStr, not_given +from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from .._utils import maybe_transform, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource @@ -53,7 +53,7 @@ def create( self, *, input: Union[str, SequenceNotStr[str]], - model: str, + model: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -70,7 +70,7 @@ def create( input: Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models. - model: The content moderation model you would like to use. + model: (Optional) The content moderation model you would like to use. extra_headers: Send extra headers @@ -120,7 +120,7 @@ async def create( self, *, input: Union[str, SequenceNotStr[str]], - model: str, + model: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -137,7 +137,7 @@ async def create( input: Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models. - model: The content moderation model you would like to use. + model: (Optional) The content moderation model you would like to use. extra_headers: Send extra headers diff --git a/src/llama_stack_client/resources/tool_runtime/rag_tool.py b/src/llama_stack_client/resources/tool_runtime/rag_tool.py index af4a7d64..7db478f7 100644 --- a/src/llama_stack_client/resources/tool_runtime/rag_tool.py +++ b/src/llama_stack_client/resources/tool_runtime/rag_tool.py @@ -57,7 +57,7 @@ def insert( *, chunk_size_in_tokens: int, documents: Iterable[Document], - vector_db_id: str, + vector_store_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -73,7 +73,7 @@ def insert( documents: List of documents to index in the RAG system - vector_db_id: ID of the vector database to store the document embeddings + vector_store_id: ID of the vector database to store the document embeddings extra_headers: Send extra headers @@ -90,7 +90,7 @@ def insert( { "chunk_size_in_tokens": chunk_size_in_tokens, "documents": documents, - "vector_db_id": vector_db_id, + "vector_store_id": vector_store_id, }, rag_tool_insert_params.RagToolInsertParams, ), @@ -104,7 +104,7 @@ def query( self, *, content: InterleavedContent, - vector_db_ids: SequenceNotStr[str], + vector_store_ids: SequenceNotStr[str], query_config: QueryConfig | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -119,7 +119,7 @@ def query( Args: content: The query content to search for in the indexed documents - vector_db_ids: List of vector database IDs to search within + vector_store_ids: List of vector database IDs to search within query_config: (Optional) Configuration parameters for the query operation @@ -136,7 +136,7 @@ def query( body=maybe_transform( { "content": content, - "vector_db_ids": vector_db_ids, + "vector_store_ids": vector_store_ids, "query_config": query_config, }, rag_tool_query_params.RagToolQueryParams, @@ -173,7 +173,7 @@ async def insert( *, chunk_size_in_tokens: int, documents: Iterable[Document], - vector_db_id: str, + vector_store_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -189,7 +189,7 @@ async def insert( documents: List of documents to index in the RAG system - vector_db_id: ID of the vector database to store the document embeddings + vector_store_id: ID of the vector database to store the document embeddings extra_headers: Send extra headers @@ -206,7 +206,7 @@ async def insert( { "chunk_size_in_tokens": chunk_size_in_tokens, "documents": documents, - "vector_db_id": vector_db_id, + "vector_store_id": vector_store_id, }, rag_tool_insert_params.RagToolInsertParams, ), @@ -220,7 +220,7 @@ async def query( self, *, content: InterleavedContent, - vector_db_ids: SequenceNotStr[str], + vector_store_ids: SequenceNotStr[str], query_config: QueryConfig | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -235,7 +235,7 @@ async def query( Args: content: The query content to search for in the indexed documents - vector_db_ids: List of vector database IDs to search within + vector_store_ids: List of vector database IDs to search within query_config: (Optional) Configuration parameters for the query operation @@ -252,7 +252,7 @@ async def query( body=await async_maybe_transform( { "content": content, - "vector_db_ids": vector_db_ids, + "vector_store_ids": vector_store_ids, "query_config": query_config, }, rag_tool_query_params.RagToolQueryParams, diff --git a/src/llama_stack_client/resources/vector_io.py b/src/llama_stack_client/resources/vector_io.py index 2659c139..dda04f33 100644 --- a/src/llama_stack_client/resources/vector_io.py +++ b/src/llama_stack_client/resources/vector_io.py @@ -54,7 +54,7 @@ def insert( self, *, chunks: Iterable[vector_io_insert_params.Chunk], - vector_db_id: str, + vector_store_id: str, ttl_seconds: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -74,7 +74,7 @@ def insert( configure how Llama Stack formats the chunk during generation. If `embedding` is not provided, it will be computed later. - vector_db_id: The identifier of the vector database to insert the chunks into. + vector_store_id: The identifier of the vector database to insert the chunks into. ttl_seconds: The time to live of the chunks. @@ -92,7 +92,7 @@ def insert( body=maybe_transform( { "chunks": chunks, - "vector_db_id": vector_db_id, + "vector_store_id": vector_store_id, "ttl_seconds": ttl_seconds, }, vector_io_insert_params.VectorIoInsertParams, @@ -107,7 +107,7 @@ def query( self, *, query: InterleavedContent, - vector_db_id: str, + vector_store_id: str, params: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -122,7 +122,7 @@ def query( Args: query: The query to search for. - vector_db_id: The identifier of the vector database to query. + vector_store_id: The identifier of the vector database to query. params: The parameters of the query. @@ -139,7 +139,7 @@ def query( body=maybe_transform( { "query": query, - "vector_db_id": vector_db_id, + "vector_store_id": vector_store_id, "params": params, }, vector_io_query_params.VectorIoQueryParams, @@ -175,7 +175,7 @@ async def insert( self, *, chunks: Iterable[vector_io_insert_params.Chunk], - vector_db_id: str, + vector_store_id: str, ttl_seconds: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -195,7 +195,7 @@ async def insert( configure how Llama Stack formats the chunk during generation. If `embedding` is not provided, it will be computed later. - vector_db_id: The identifier of the vector database to insert the chunks into. + vector_store_id: The identifier of the vector database to insert the chunks into. ttl_seconds: The time to live of the chunks. @@ -213,7 +213,7 @@ async def insert( body=await async_maybe_transform( { "chunks": chunks, - "vector_db_id": vector_db_id, + "vector_store_id": vector_store_id, "ttl_seconds": ttl_seconds, }, vector_io_insert_params.VectorIoInsertParams, @@ -228,7 +228,7 @@ async def query( self, *, query: InterleavedContent, - vector_db_id: str, + vector_store_id: str, params: Dict[str, Union[bool, float, str, Iterable[object], object, None]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -243,7 +243,7 @@ async def query( Args: query: The query to search for. - vector_db_id: The identifier of the vector database to query. + vector_store_id: The identifier of the vector database to query. params: The parameters of the query. @@ -260,7 +260,7 @@ async def query( body=await async_maybe_transform( { "query": query, - "vector_db_id": vector_db_id, + "vector_store_id": vector_store_id, "params": params, }, vector_io_query_params.VectorIoQueryParams, diff --git a/src/llama_stack_client/types/alpha/memory_retrieval_step.py b/src/llama_stack_client/types/alpha/memory_retrieval_step.py index 727c0ec0..1b5708ce 100644 --- a/src/llama_stack_client/types/alpha/memory_retrieval_step.py +++ b/src/llama_stack_client/types/alpha/memory_retrieval_step.py @@ -29,7 +29,7 @@ class MemoryRetrievalStep(BaseModel): turn_id: str """The ID of the turn.""" - vector_db_ids: str + vector_store_ids: str """The IDs of the vector databases to retrieve context from.""" completed_at: Optional[datetime] = None diff --git a/src/llama_stack_client/types/moderation_create_params.py b/src/llama_stack_client/types/moderation_create_params.py index ac0933cf..ed27fe37 100644 --- a/src/llama_stack_client/types/moderation_create_params.py +++ b/src/llama_stack_client/types/moderation_create_params.py @@ -24,5 +24,5 @@ class ModerationCreateParams(TypedDict, total=False): objects similar to other models. """ - model: Required[str] - """The content moderation model you would like to use.""" + model: str + """(Optional) The content moderation model you would like to use.""" diff --git a/src/llama_stack_client/types/response_create_params.py b/src/llama_stack_client/types/response_create_params.py index 87c8ab68..c8b48657 100644 --- a/src/llama_stack_client/types/response_create_params.py +++ b/src/llama_stack_client/types/response_create_params.py @@ -16,16 +16,6 @@ __all__ = [ "ResponseCreateParamsBase", "InputUnionMember1", - "InputUnionMember1OpenAIResponseOutputMessageWebSearchToolCall", - "InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall", - "InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult", - "InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall", - "InputUnionMember1OpenAIResponseInputFunctionToolCallOutput", - "InputUnionMember1OpenAIResponseMcpApprovalRequest", - "InputUnionMember1OpenAIResponseMcpApprovalResponse", - "InputUnionMember1OpenAIResponseOutputMessageMcpCall", - "InputUnionMember1OpenAIResponseOutputMessageMcpListTools", - "InputUnionMember1OpenAIResponseOutputMessageMcpListToolsTool", "InputUnionMember1OpenAIResponseMessage", "InputUnionMember1OpenAIResponseMessageContentUnionMember1", "InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText", @@ -38,6 +28,16 @@ "InputUnionMember1OpenAIResponseMessageContentUnionMember2OpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation", "InputUnionMember1OpenAIResponseMessageContentUnionMember2OpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFilePath", "InputUnionMember1OpenAIResponseMessageContentUnionMember2OpenAIResponseContentPartRefusal", + "InputUnionMember1OpenAIResponseOutputMessageWebSearchToolCall", + "InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall", + "InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult", + "InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall", + "InputUnionMember1OpenAIResponseOutputMessageMcpCall", + "InputUnionMember1OpenAIResponseOutputMessageMcpListTools", + "InputUnionMember1OpenAIResponseOutputMessageMcpListToolsTool", + "InputUnionMember1OpenAIResponseMcpApprovalRequest", + "InputUnionMember1OpenAIResponseInputFunctionToolCallOutput", + "InputUnionMember1OpenAIResponseMcpApprovalResponse", "Text", "TextFormat", "Tool", @@ -93,155 +93,6 @@ class ResponseCreateParamsBase(TypedDict, total=False): tools: Iterable[Tool] -class InputUnionMember1OpenAIResponseOutputMessageWebSearchToolCall(TypedDict, total=False): - id: Required[str] - """Unique identifier for this tool call""" - - status: Required[str] - """Current status of the web search operation""" - - type: Required[Literal["web_search_call"]] - """Tool call type identifier, always "web_search_call" """ - - -class InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult(TypedDict, total=False): - attributes: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] - """(Optional) Key-value attributes associated with the file""" - - file_id: Required[str] - """Unique identifier of the file containing the result""" - - filename: Required[str] - """Name of the file containing the result""" - - score: Required[float] - """Relevance score for this search result (between 0 and 1)""" - - text: Required[str] - """Text content of the search result""" - - -class InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall(TypedDict, total=False): - id: Required[str] - """Unique identifier for this tool call""" - - queries: Required[SequenceNotStr[str]] - """List of search queries executed""" - - status: Required[str] - """Current status of the file search operation""" - - type: Required[Literal["file_search_call"]] - """Tool call type identifier, always "file_search_call" """ - - results: Iterable[InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult] - """(Optional) Search results returned by the file search operation""" - - -class InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall(TypedDict, total=False): - arguments: Required[str] - """JSON string containing the function arguments""" - - call_id: Required[str] - """Unique identifier for the function call""" - - name: Required[str] - """Name of the function being called""" - - type: Required[Literal["function_call"]] - """Tool call type identifier, always "function_call" """ - - id: str - """(Optional) Additional identifier for the tool call""" - - status: str - """(Optional) Current status of the function call execution""" - - -class InputUnionMember1OpenAIResponseInputFunctionToolCallOutput(TypedDict, total=False): - call_id: Required[str] - - output: Required[str] - - type: Required[Literal["function_call_output"]] - - id: str - - status: str - - -class InputUnionMember1OpenAIResponseMcpApprovalRequest(TypedDict, total=False): - id: Required[str] - - arguments: Required[str] - - name: Required[str] - - server_label: Required[str] - - type: Required[Literal["mcp_approval_request"]] - - -class InputUnionMember1OpenAIResponseMcpApprovalResponse(TypedDict, total=False): - approval_request_id: Required[str] - - approve: Required[bool] - - type: Required[Literal["mcp_approval_response"]] - - id: str - - reason: str - - -class InputUnionMember1OpenAIResponseOutputMessageMcpCall(TypedDict, total=False): - id: Required[str] - """Unique identifier for this MCP call""" - - arguments: Required[str] - """JSON string containing the MCP call arguments""" - - name: Required[str] - """Name of the MCP method being called""" - - server_label: Required[str] - """Label identifying the MCP server handling the call""" - - type: Required[Literal["mcp_call"]] - """Tool call type identifier, always "mcp_call" """ - - error: str - """(Optional) Error message if the MCP call failed""" - - output: str - """(Optional) Output result from the successful MCP call""" - - -class InputUnionMember1OpenAIResponseOutputMessageMcpListToolsTool(TypedDict, total=False): - input_schema: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] - """JSON schema defining the tool's input parameters""" - - name: Required[str] - """Name of the tool""" - - description: str - """(Optional) Description of what the tool does""" - - -class InputUnionMember1OpenAIResponseOutputMessageMcpListTools(TypedDict, total=False): - id: Required[str] - """Unique identifier for this MCP list tools operation""" - - server_label: Required[str] - """Label identifying the MCP server providing the tools""" - - tools: Required[Iterable[InputUnionMember1OpenAIResponseOutputMessageMcpListToolsTool]] - """List of available tools provided by the MCP server""" - - type: Required[Literal["mcp_list_tools"]] - """Tool call type identifier, always "mcp_list_tools" """ - - class InputUnionMember1OpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText( TypedDict, total=False ): @@ -386,15 +237,165 @@ class InputUnionMember1OpenAIResponseMessage(TypedDict, total=False): status: str +class InputUnionMember1OpenAIResponseOutputMessageWebSearchToolCall(TypedDict, total=False): + id: Required[str] + """Unique identifier for this tool call""" + + status: Required[str] + """Current status of the web search operation""" + + type: Required[Literal["web_search_call"]] + """Tool call type identifier, always "web_search_call" """ + + +class InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult(TypedDict, total=False): + attributes: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] + """(Optional) Key-value attributes associated with the file""" + + file_id: Required[str] + """Unique identifier of the file containing the result""" + + filename: Required[str] + """Name of the file containing the result""" + + score: Required[float] + """Relevance score for this search result (between 0 and 1)""" + + text: Required[str] + """Text content of the search result""" + + +class InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall(TypedDict, total=False): + id: Required[str] + """Unique identifier for this tool call""" + + queries: Required[SequenceNotStr[str]] + """List of search queries executed""" + + status: Required[str] + """Current status of the file search operation""" + + type: Required[Literal["file_search_call"]] + """Tool call type identifier, always "file_search_call" """ + + results: Iterable[InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCallResult] + """(Optional) Search results returned by the file search operation""" + + +class InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall(TypedDict, total=False): + arguments: Required[str] + """JSON string containing the function arguments""" + + call_id: Required[str] + """Unique identifier for the function call""" + + name: Required[str] + """Name of the function being called""" + + type: Required[Literal["function_call"]] + """Tool call type identifier, always "function_call" """ + + id: str + """(Optional) Additional identifier for the tool call""" + + status: str + """(Optional) Current status of the function call execution""" + + +class InputUnionMember1OpenAIResponseOutputMessageMcpCall(TypedDict, total=False): + id: Required[str] + """Unique identifier for this MCP call""" + + arguments: Required[str] + """JSON string containing the MCP call arguments""" + + name: Required[str] + """Name of the MCP method being called""" + + server_label: Required[str] + """Label identifying the MCP server handling the call""" + + type: Required[Literal["mcp_call"]] + """Tool call type identifier, always "mcp_call" """ + + error: str + """(Optional) Error message if the MCP call failed""" + + output: str + """(Optional) Output result from the successful MCP call""" + + +class InputUnionMember1OpenAIResponseOutputMessageMcpListToolsTool(TypedDict, total=False): + input_schema: Required[Dict[str, Union[bool, float, str, Iterable[object], object, None]]] + """JSON schema defining the tool's input parameters""" + + name: Required[str] + """Name of the tool""" + + description: str + """(Optional) Description of what the tool does""" + + +class InputUnionMember1OpenAIResponseOutputMessageMcpListTools(TypedDict, total=False): + id: Required[str] + """Unique identifier for this MCP list tools operation""" + + server_label: Required[str] + """Label identifying the MCP server providing the tools""" + + tools: Required[Iterable[InputUnionMember1OpenAIResponseOutputMessageMcpListToolsTool]] + """List of available tools provided by the MCP server""" + + type: Required[Literal["mcp_list_tools"]] + """Tool call type identifier, always "mcp_list_tools" """ + + +class InputUnionMember1OpenAIResponseMcpApprovalRequest(TypedDict, total=False): + id: Required[str] + + arguments: Required[str] + + name: Required[str] + + server_label: Required[str] + + type: Required[Literal["mcp_approval_request"]] + + +class InputUnionMember1OpenAIResponseInputFunctionToolCallOutput(TypedDict, total=False): + call_id: Required[str] + + output: Required[str] + + type: Required[Literal["function_call_output"]] + + id: str + + status: str + + +class InputUnionMember1OpenAIResponseMcpApprovalResponse(TypedDict, total=False): + approval_request_id: Required[str] + + approve: Required[bool] + + type: Required[Literal["mcp_approval_response"]] + + id: str + + reason: str + + InputUnionMember1: TypeAlias = Union[ + InputUnionMember1OpenAIResponseMessage, InputUnionMember1OpenAIResponseOutputMessageWebSearchToolCall, InputUnionMember1OpenAIResponseOutputMessageFileSearchToolCall, InputUnionMember1OpenAIResponseOutputMessageFunctionToolCall, - InputUnionMember1OpenAIResponseInputFunctionToolCallOutput, - InputUnionMember1OpenAIResponseMcpApprovalRequest, - InputUnionMember1OpenAIResponseMcpApprovalResponse, InputUnionMember1OpenAIResponseOutputMessageMcpCall, InputUnionMember1OpenAIResponseOutputMessageMcpListTools, + InputUnionMember1OpenAIResponseMcpApprovalRequest, + InputUnionMember1OpenAIResponseInputFunctionToolCallOutput, + InputUnionMember1OpenAIResponseMcpApprovalResponse, InputUnionMember1OpenAIResponseMessage, ] diff --git a/src/llama_stack_client/types/response_list_response.py b/src/llama_stack_client/types/response_list_response.py index 8a091316..78c683b4 100644 --- a/src/llama_stack_client/types/response_list_response.py +++ b/src/llama_stack_client/types/response_list_response.py @@ -17,16 +17,6 @@ __all__ = [ "ResponseListResponse", "Input", - "InputOpenAIResponseOutputMessageWebSearchToolCall", - "InputOpenAIResponseOutputMessageFileSearchToolCall", - "InputOpenAIResponseOutputMessageFileSearchToolCallResult", - "InputOpenAIResponseOutputMessageFunctionToolCall", - "InputOpenAIResponseInputFunctionToolCallOutput", - "InputOpenAIResponseMcpApprovalRequest", - "InputOpenAIResponseMcpApprovalResponse", - "InputOpenAIResponseOutputMessageMcpCall", - "InputOpenAIResponseOutputMessageMcpListTools", - "InputOpenAIResponseOutputMessageMcpListToolsTool", "InputOpenAIResponseMessage", "InputOpenAIResponseMessageContentUnionMember1", "InputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText", @@ -39,6 +29,16 @@ "InputOpenAIResponseMessageContentUnionMember2OpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation", "InputOpenAIResponseMessageContentUnionMember2OpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFilePath", "InputOpenAIResponseMessageContentUnionMember2OpenAIResponseContentPartRefusal", + "InputOpenAIResponseOutputMessageWebSearchToolCall", + "InputOpenAIResponseOutputMessageFileSearchToolCall", + "InputOpenAIResponseOutputMessageFileSearchToolCallResult", + "InputOpenAIResponseOutputMessageFunctionToolCall", + "InputOpenAIResponseOutputMessageMcpCall", + "InputOpenAIResponseOutputMessageMcpListTools", + "InputOpenAIResponseOutputMessageMcpListToolsTool", + "InputOpenAIResponseMcpApprovalRequest", + "InputOpenAIResponseInputFunctionToolCallOutput", + "InputOpenAIResponseMcpApprovalResponse", "Output", "OutputOpenAIResponseMessage", "OutputOpenAIResponseMessageContentUnionMember1", @@ -77,155 +77,6 @@ ] -class InputOpenAIResponseOutputMessageWebSearchToolCall(BaseModel): - id: str - """Unique identifier for this tool call""" - - status: str - """Current status of the web search operation""" - - type: Literal["web_search_call"] - """Tool call type identifier, always "web_search_call" """ - - -class InputOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel): - attributes: Dict[str, Union[bool, float, str, List[object], object, None]] - """(Optional) Key-value attributes associated with the file""" - - file_id: str - """Unique identifier of the file containing the result""" - - filename: str - """Name of the file containing the result""" - - score: float - """Relevance score for this search result (between 0 and 1)""" - - text: str - """Text content of the search result""" - - -class InputOpenAIResponseOutputMessageFileSearchToolCall(BaseModel): - id: str - """Unique identifier for this tool call""" - - queries: List[str] - """List of search queries executed""" - - status: str - """Current status of the file search operation""" - - type: Literal["file_search_call"] - """Tool call type identifier, always "file_search_call" """ - - results: Optional[List[InputOpenAIResponseOutputMessageFileSearchToolCallResult]] = None - """(Optional) Search results returned by the file search operation""" - - -class InputOpenAIResponseOutputMessageFunctionToolCall(BaseModel): - arguments: str - """JSON string containing the function arguments""" - - call_id: str - """Unique identifier for the function call""" - - name: str - """Name of the function being called""" - - type: Literal["function_call"] - """Tool call type identifier, always "function_call" """ - - id: Optional[str] = None - """(Optional) Additional identifier for the tool call""" - - status: Optional[str] = None - """(Optional) Current status of the function call execution""" - - -class InputOpenAIResponseInputFunctionToolCallOutput(BaseModel): - call_id: str - - output: str - - type: Literal["function_call_output"] - - id: Optional[str] = None - - status: Optional[str] = None - - -class InputOpenAIResponseMcpApprovalRequest(BaseModel): - id: str - - arguments: str - - name: str - - server_label: str - - type: Literal["mcp_approval_request"] - - -class InputOpenAIResponseMcpApprovalResponse(BaseModel): - approval_request_id: str - - approve: bool - - type: Literal["mcp_approval_response"] - - id: Optional[str] = None - - reason: Optional[str] = None - - -class InputOpenAIResponseOutputMessageMcpCall(BaseModel): - id: str - """Unique identifier for this MCP call""" - - arguments: str - """JSON string containing the MCP call arguments""" - - name: str - """Name of the MCP method being called""" - - server_label: str - """Label identifying the MCP server handling the call""" - - type: Literal["mcp_call"] - """Tool call type identifier, always "mcp_call" """ - - error: Optional[str] = None - """(Optional) Error message if the MCP call failed""" - - output: Optional[str] = None - """(Optional) Output result from the successful MCP call""" - - -class InputOpenAIResponseOutputMessageMcpListToolsTool(BaseModel): - input_schema: Dict[str, Union[bool, float, str, List[object], object, None]] - """JSON schema defining the tool's input parameters""" - - name: str - """Name of the tool""" - - description: Optional[str] = None - """(Optional) Description of what the tool does""" - - -class InputOpenAIResponseOutputMessageMcpListTools(BaseModel): - id: str - """Unique identifier for this MCP list tools operation""" - - server_label: str - """Label identifying the MCP server providing the tools""" - - tools: List[InputOpenAIResponseOutputMessageMcpListToolsTool] - """List of available tools provided by the MCP server""" - - type: Literal["mcp_list_tools"] - """Tool call type identifier, always "mcp_list_tools" """ - - class InputOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel): text: str """The text content of the input message""" @@ -367,15 +218,165 @@ class InputOpenAIResponseMessage(BaseModel): status: Optional[str] = None +class InputOpenAIResponseOutputMessageWebSearchToolCall(BaseModel): + id: str + """Unique identifier for this tool call""" + + status: str + """Current status of the web search operation""" + + type: Literal["web_search_call"] + """Tool call type identifier, always "web_search_call" """ + + +class InputOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel): + attributes: Dict[str, Union[bool, float, str, List[object], object, None]] + """(Optional) Key-value attributes associated with the file""" + + file_id: str + """Unique identifier of the file containing the result""" + + filename: str + """Name of the file containing the result""" + + score: float + """Relevance score for this search result (between 0 and 1)""" + + text: str + """Text content of the search result""" + + +class InputOpenAIResponseOutputMessageFileSearchToolCall(BaseModel): + id: str + """Unique identifier for this tool call""" + + queries: List[str] + """List of search queries executed""" + + status: str + """Current status of the file search operation""" + + type: Literal["file_search_call"] + """Tool call type identifier, always "file_search_call" """ + + results: Optional[List[InputOpenAIResponseOutputMessageFileSearchToolCallResult]] = None + """(Optional) Search results returned by the file search operation""" + + +class InputOpenAIResponseOutputMessageFunctionToolCall(BaseModel): + arguments: str + """JSON string containing the function arguments""" + + call_id: str + """Unique identifier for the function call""" + + name: str + """Name of the function being called""" + + type: Literal["function_call"] + """Tool call type identifier, always "function_call" """ + + id: Optional[str] = None + """(Optional) Additional identifier for the tool call""" + + status: Optional[str] = None + """(Optional) Current status of the function call execution""" + + +class InputOpenAIResponseOutputMessageMcpCall(BaseModel): + id: str + """Unique identifier for this MCP call""" + + arguments: str + """JSON string containing the MCP call arguments""" + + name: str + """Name of the MCP method being called""" + + server_label: str + """Label identifying the MCP server handling the call""" + + type: Literal["mcp_call"] + """Tool call type identifier, always "mcp_call" """ + + error: Optional[str] = None + """(Optional) Error message if the MCP call failed""" + + output: Optional[str] = None + """(Optional) Output result from the successful MCP call""" + + +class InputOpenAIResponseOutputMessageMcpListToolsTool(BaseModel): + input_schema: Dict[str, Union[bool, float, str, List[object], object, None]] + """JSON schema defining the tool's input parameters""" + + name: str + """Name of the tool""" + + description: Optional[str] = None + """(Optional) Description of what the tool does""" + + +class InputOpenAIResponseOutputMessageMcpListTools(BaseModel): + id: str + """Unique identifier for this MCP list tools operation""" + + server_label: str + """Label identifying the MCP server providing the tools""" + + tools: List[InputOpenAIResponseOutputMessageMcpListToolsTool] + """List of available tools provided by the MCP server""" + + type: Literal["mcp_list_tools"] + """Tool call type identifier, always "mcp_list_tools" """ + + +class InputOpenAIResponseMcpApprovalRequest(BaseModel): + id: str + + arguments: str + + name: str + + server_label: str + + type: Literal["mcp_approval_request"] + + +class InputOpenAIResponseInputFunctionToolCallOutput(BaseModel): + call_id: str + + output: str + + type: Literal["function_call_output"] + + id: Optional[str] = None + + status: Optional[str] = None + + +class InputOpenAIResponseMcpApprovalResponse(BaseModel): + approval_request_id: str + + approve: bool + + type: Literal["mcp_approval_response"] + + id: Optional[str] = None + + reason: Optional[str] = None + + Input: TypeAlias = Union[ + InputOpenAIResponseMessage, InputOpenAIResponseOutputMessageWebSearchToolCall, InputOpenAIResponseOutputMessageFileSearchToolCall, InputOpenAIResponseOutputMessageFunctionToolCall, - InputOpenAIResponseInputFunctionToolCallOutput, - InputOpenAIResponseMcpApprovalRequest, - InputOpenAIResponseMcpApprovalResponse, InputOpenAIResponseOutputMessageMcpCall, InputOpenAIResponseOutputMessageMcpListTools, + InputOpenAIResponseMcpApprovalRequest, + InputOpenAIResponseInputFunctionToolCallOutput, + InputOpenAIResponseMcpApprovalResponse, InputOpenAIResponseMessage, ] diff --git a/src/llama_stack_client/types/responses/input_item_list_response.py b/src/llama_stack_client/types/responses/input_item_list_response.py index 3cb26346..b812ee62 100644 --- a/src/llama_stack_client/types/responses/input_item_list_response.py +++ b/src/llama_stack_client/types/responses/input_item_list_response.py @@ -15,16 +15,6 @@ __all__ = [ "InputItemListResponse", "Data", - "DataOpenAIResponseOutputMessageWebSearchToolCall", - "DataOpenAIResponseOutputMessageFileSearchToolCall", - "DataOpenAIResponseOutputMessageFileSearchToolCallResult", - "DataOpenAIResponseOutputMessageFunctionToolCall", - "DataOpenAIResponseInputFunctionToolCallOutput", - "DataOpenAIResponseMcpApprovalRequest", - "DataOpenAIResponseMcpApprovalResponse", - "DataOpenAIResponseOutputMessageMcpCall", - "DataOpenAIResponseOutputMessageMcpListTools", - "DataOpenAIResponseOutputMessageMcpListToolsTool", "DataOpenAIResponseMessage", "DataOpenAIResponseMessageContentUnionMember1", "DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText", @@ -37,158 +27,19 @@ "DataOpenAIResponseMessageContentUnionMember2OpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation", "DataOpenAIResponseMessageContentUnionMember2OpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFilePath", "DataOpenAIResponseMessageContentUnionMember2OpenAIResponseContentPartRefusal", + "DataOpenAIResponseOutputMessageWebSearchToolCall", + "DataOpenAIResponseOutputMessageFileSearchToolCall", + "DataOpenAIResponseOutputMessageFileSearchToolCallResult", + "DataOpenAIResponseOutputMessageFunctionToolCall", + "DataOpenAIResponseOutputMessageMcpCall", + "DataOpenAIResponseOutputMessageMcpListTools", + "DataOpenAIResponseOutputMessageMcpListToolsTool", + "DataOpenAIResponseMcpApprovalRequest", + "DataOpenAIResponseInputFunctionToolCallOutput", + "DataOpenAIResponseMcpApprovalResponse", ] -class DataOpenAIResponseOutputMessageWebSearchToolCall(BaseModel): - id: str - """Unique identifier for this tool call""" - - status: str - """Current status of the web search operation""" - - type: Literal["web_search_call"] - """Tool call type identifier, always "web_search_call" """ - - -class DataOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel): - attributes: Dict[str, Union[bool, float, str, List[object], object, None]] - """(Optional) Key-value attributes associated with the file""" - - file_id: str - """Unique identifier of the file containing the result""" - - filename: str - """Name of the file containing the result""" - - score: float - """Relevance score for this search result (between 0 and 1)""" - - text: str - """Text content of the search result""" - - -class DataOpenAIResponseOutputMessageFileSearchToolCall(BaseModel): - id: str - """Unique identifier for this tool call""" - - queries: List[str] - """List of search queries executed""" - - status: str - """Current status of the file search operation""" - - type: Literal["file_search_call"] - """Tool call type identifier, always "file_search_call" """ - - results: Optional[List[DataOpenAIResponseOutputMessageFileSearchToolCallResult]] = None - """(Optional) Search results returned by the file search operation""" - - -class DataOpenAIResponseOutputMessageFunctionToolCall(BaseModel): - arguments: str - """JSON string containing the function arguments""" - - call_id: str - """Unique identifier for the function call""" - - name: str - """Name of the function being called""" - - type: Literal["function_call"] - """Tool call type identifier, always "function_call" """ - - id: Optional[str] = None - """(Optional) Additional identifier for the tool call""" - - status: Optional[str] = None - """(Optional) Current status of the function call execution""" - - -class DataOpenAIResponseInputFunctionToolCallOutput(BaseModel): - call_id: str - - output: str - - type: Literal["function_call_output"] - - id: Optional[str] = None - - status: Optional[str] = None - - -class DataOpenAIResponseMcpApprovalRequest(BaseModel): - id: str - - arguments: str - - name: str - - server_label: str - - type: Literal["mcp_approval_request"] - - -class DataOpenAIResponseMcpApprovalResponse(BaseModel): - approval_request_id: str - - approve: bool - - type: Literal["mcp_approval_response"] - - id: Optional[str] = None - - reason: Optional[str] = None - - -class DataOpenAIResponseOutputMessageMcpCall(BaseModel): - id: str - """Unique identifier for this MCP call""" - - arguments: str - """JSON string containing the MCP call arguments""" - - name: str - """Name of the MCP method being called""" - - server_label: str - """Label identifying the MCP server handling the call""" - - type: Literal["mcp_call"] - """Tool call type identifier, always "mcp_call" """ - - error: Optional[str] = None - """(Optional) Error message if the MCP call failed""" - - output: Optional[str] = None - """(Optional) Output result from the successful MCP call""" - - -class DataOpenAIResponseOutputMessageMcpListToolsTool(BaseModel): - input_schema: Dict[str, Union[bool, float, str, List[object], object, None]] - """JSON schema defining the tool's input parameters""" - - name: str - """Name of the tool""" - - description: Optional[str] = None - """(Optional) Description of what the tool does""" - - -class DataOpenAIResponseOutputMessageMcpListTools(BaseModel): - id: str - """Unique identifier for this MCP list tools operation""" - - server_label: str - """Label identifying the MCP server providing the tools""" - - tools: List[DataOpenAIResponseOutputMessageMcpListToolsTool] - """List of available tools provided by the MCP server""" - - type: Literal["mcp_list_tools"] - """Tool call type identifier, always "mcp_list_tools" """ - - class DataOpenAIResponseMessageContentUnionMember1OpenAIResponseInputMessageContentText(BaseModel): text: str """The text content of the input message""" @@ -330,15 +181,165 @@ class DataOpenAIResponseMessage(BaseModel): status: Optional[str] = None +class DataOpenAIResponseOutputMessageWebSearchToolCall(BaseModel): + id: str + """Unique identifier for this tool call""" + + status: str + """Current status of the web search operation""" + + type: Literal["web_search_call"] + """Tool call type identifier, always "web_search_call" """ + + +class DataOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel): + attributes: Dict[str, Union[bool, float, str, List[object], object, None]] + """(Optional) Key-value attributes associated with the file""" + + file_id: str + """Unique identifier of the file containing the result""" + + filename: str + """Name of the file containing the result""" + + score: float + """Relevance score for this search result (between 0 and 1)""" + + text: str + """Text content of the search result""" + + +class DataOpenAIResponseOutputMessageFileSearchToolCall(BaseModel): + id: str + """Unique identifier for this tool call""" + + queries: List[str] + """List of search queries executed""" + + status: str + """Current status of the file search operation""" + + type: Literal["file_search_call"] + """Tool call type identifier, always "file_search_call" """ + + results: Optional[List[DataOpenAIResponseOutputMessageFileSearchToolCallResult]] = None + """(Optional) Search results returned by the file search operation""" + + +class DataOpenAIResponseOutputMessageFunctionToolCall(BaseModel): + arguments: str + """JSON string containing the function arguments""" + + call_id: str + """Unique identifier for the function call""" + + name: str + """Name of the function being called""" + + type: Literal["function_call"] + """Tool call type identifier, always "function_call" """ + + id: Optional[str] = None + """(Optional) Additional identifier for the tool call""" + + status: Optional[str] = None + """(Optional) Current status of the function call execution""" + + +class DataOpenAIResponseOutputMessageMcpCall(BaseModel): + id: str + """Unique identifier for this MCP call""" + + arguments: str + """JSON string containing the MCP call arguments""" + + name: str + """Name of the MCP method being called""" + + server_label: str + """Label identifying the MCP server handling the call""" + + type: Literal["mcp_call"] + """Tool call type identifier, always "mcp_call" """ + + error: Optional[str] = None + """(Optional) Error message if the MCP call failed""" + + output: Optional[str] = None + """(Optional) Output result from the successful MCP call""" + + +class DataOpenAIResponseOutputMessageMcpListToolsTool(BaseModel): + input_schema: Dict[str, Union[bool, float, str, List[object], object, None]] + """JSON schema defining the tool's input parameters""" + + name: str + """Name of the tool""" + + description: Optional[str] = None + """(Optional) Description of what the tool does""" + + +class DataOpenAIResponseOutputMessageMcpListTools(BaseModel): + id: str + """Unique identifier for this MCP list tools operation""" + + server_label: str + """Label identifying the MCP server providing the tools""" + + tools: List[DataOpenAIResponseOutputMessageMcpListToolsTool] + """List of available tools provided by the MCP server""" + + type: Literal["mcp_list_tools"] + """Tool call type identifier, always "mcp_list_tools" """ + + +class DataOpenAIResponseMcpApprovalRequest(BaseModel): + id: str + + arguments: str + + name: str + + server_label: str + + type: Literal["mcp_approval_request"] + + +class DataOpenAIResponseInputFunctionToolCallOutput(BaseModel): + call_id: str + + output: str + + type: Literal["function_call_output"] + + id: Optional[str] = None + + status: Optional[str] = None + + +class DataOpenAIResponseMcpApprovalResponse(BaseModel): + approval_request_id: str + + approve: bool + + type: Literal["mcp_approval_response"] + + id: Optional[str] = None + + reason: Optional[str] = None + + Data: TypeAlias = Union[ + DataOpenAIResponseMessage, DataOpenAIResponseOutputMessageWebSearchToolCall, DataOpenAIResponseOutputMessageFileSearchToolCall, DataOpenAIResponseOutputMessageFunctionToolCall, - DataOpenAIResponseInputFunctionToolCallOutput, - DataOpenAIResponseMcpApprovalRequest, - DataOpenAIResponseMcpApprovalResponse, DataOpenAIResponseOutputMessageMcpCall, DataOpenAIResponseOutputMessageMcpListTools, + DataOpenAIResponseMcpApprovalRequest, + DataOpenAIResponseInputFunctionToolCallOutput, + DataOpenAIResponseMcpApprovalResponse, DataOpenAIResponseMessage, ] diff --git a/src/llama_stack_client/types/tool_runtime/rag_tool_insert_params.py b/src/llama_stack_client/types/tool_runtime/rag_tool_insert_params.py index 095a2a69..d65980c5 100644 --- a/src/llama_stack_client/types/tool_runtime/rag_tool_insert_params.py +++ b/src/llama_stack_client/types/tool_runtime/rag_tool_insert_params.py @@ -23,5 +23,5 @@ class RagToolInsertParams(TypedDict, total=False): documents: Required[Iterable[Document]] """List of documents to index in the RAG system""" - vector_db_id: Required[str] + vector_store_id: Required[str] """ID of the vector database to store the document embeddings""" diff --git a/src/llama_stack_client/types/tool_runtime/rag_tool_query_params.py b/src/llama_stack_client/types/tool_runtime/rag_tool_query_params.py index 08d1f998..6e8fa8ce 100644 --- a/src/llama_stack_client/types/tool_runtime/rag_tool_query_params.py +++ b/src/llama_stack_client/types/tool_runtime/rag_tool_query_params.py @@ -21,7 +21,7 @@ class RagToolQueryParams(TypedDict, total=False): content: Required[InterleavedContent] """The query content to search for in the indexed documents""" - vector_db_ids: Required[SequenceNotStr[str]] + vector_store_ids: Required[SequenceNotStr[str]] """List of vector database IDs to search within""" query_config: QueryConfig diff --git a/src/llama_stack_client/types/vector_io_insert_params.py b/src/llama_stack_client/types/vector_io_insert_params.py index 5613251f..1584f807 100644 --- a/src/llama_stack_client/types/vector_io_insert_params.py +++ b/src/llama_stack_client/types/vector_io_insert_params.py @@ -27,7 +27,7 @@ class VectorIoInsertParams(TypedDict, total=False): later. """ - vector_db_id: Required[str] + vector_store_id: Required[str] """The identifier of the vector database to insert the chunks into.""" ttl_seconds: int diff --git a/src/llama_stack_client/types/vector_io_query_params.py b/src/llama_stack_client/types/vector_io_query_params.py index a2fdc561..538604ac 100644 --- a/src/llama_stack_client/types/vector_io_query_params.py +++ b/src/llama_stack_client/types/vector_io_query_params.py @@ -20,7 +20,7 @@ class VectorIoQueryParams(TypedDict, total=False): query: Required[InterleavedContent] """The query to search for.""" - vector_db_id: Required[str] + vector_store_id: Required[str] """The identifier of the vector database to query.""" params: Dict[str, Union[bool, float, str, Iterable[object], object, None]] diff --git a/tests/api_resources/test_moderations.py b/tests/api_resources/test_moderations.py index 5d022f3d..48b2ece3 100644 --- a/tests/api_resources/test_moderations.py +++ b/tests/api_resources/test_moderations.py @@ -25,6 +25,13 @@ class TestModerations: @parametrize def test_method_create(self, client: LlamaStackClient) -> None: + moderation = client.moderations.create( + input="string", + ) + assert_matches_type(CreateResponse, moderation, path=["response"]) + + @parametrize + def test_method_create_with_all_params(self, client: LlamaStackClient) -> None: moderation = client.moderations.create( input="string", model="model", @@ -35,7 +42,6 @@ def test_method_create(self, client: LlamaStackClient) -> None: def test_raw_response_create(self, client: LlamaStackClient) -> None: response = client.moderations.with_raw_response.create( input="string", - model="model", ) assert response.is_closed is True @@ -47,7 +53,6 @@ def test_raw_response_create(self, client: LlamaStackClient) -> None: def test_streaming_response_create(self, client: LlamaStackClient) -> None: with client.moderations.with_streaming_response.create( input="string", - model="model", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -65,6 +70,13 @@ class TestAsyncModerations: @parametrize async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None: + moderation = await async_client.moderations.create( + input="string", + ) + assert_matches_type(CreateResponse, moderation, path=["response"]) + + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: moderation = await async_client.moderations.create( input="string", model="model", @@ -75,7 +87,6 @@ async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None: async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.moderations.with_raw_response.create( input="string", - model="model", ) assert response.is_closed is True @@ -87,7 +98,6 @@ async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.moderations.with_streaming_response.create( input="string", - model="model", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_vector_io.py b/tests/api_resources/test_vector_io.py index 2e37edcf..9adf721a 100644 --- a/tests/api_resources/test_vector_io.py +++ b/tests/api_resources/test_vector_io.py @@ -32,7 +32,7 @@ def test_method_insert(self, client: LlamaStackClient) -> None: "metadata": {"foo": True}, } ], - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) assert vector_io is None @@ -60,7 +60,7 @@ def test_method_insert_with_all_params(self, client: LlamaStackClient) -> None: "stored_chunk_id": "stored_chunk_id", } ], - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ttl_seconds=0, ) assert vector_io is None @@ -74,7 +74,7 @@ def test_raw_response_insert(self, client: LlamaStackClient) -> None: "metadata": {"foo": True}, } ], - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -91,7 +91,7 @@ def test_streaming_response_insert(self, client: LlamaStackClient) -> None: "metadata": {"foo": True}, } ], - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -105,7 +105,7 @@ def test_streaming_response_insert(self, client: LlamaStackClient) -> None: def test_method_query(self, client: LlamaStackClient) -> None: vector_io = client.vector_io.query( query="string", - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) assert_matches_type(QueryChunksResponse, vector_io, path=["response"]) @@ -113,7 +113,7 @@ def test_method_query(self, client: LlamaStackClient) -> None: def test_method_query_with_all_params(self, client: LlamaStackClient) -> None: vector_io = client.vector_io.query( query="string", - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", params={"foo": True}, ) assert_matches_type(QueryChunksResponse, vector_io, path=["response"]) @@ -122,7 +122,7 @@ def test_method_query_with_all_params(self, client: LlamaStackClient) -> None: def test_raw_response_query(self, client: LlamaStackClient) -> None: response = client.vector_io.with_raw_response.query( query="string", - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -134,7 +134,7 @@ def test_raw_response_query(self, client: LlamaStackClient) -> None: def test_streaming_response_query(self, client: LlamaStackClient) -> None: with client.vector_io.with_streaming_response.query( query="string", - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -159,7 +159,7 @@ async def test_method_insert(self, async_client: AsyncLlamaStackClient) -> None: "metadata": {"foo": True}, } ], - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) assert vector_io is None @@ -187,7 +187,7 @@ async def test_method_insert_with_all_params(self, async_client: AsyncLlamaStack "stored_chunk_id": "stored_chunk_id", } ], - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ttl_seconds=0, ) assert vector_io is None @@ -201,7 +201,7 @@ async def test_raw_response_insert(self, async_client: AsyncLlamaStackClient) -> "metadata": {"foo": True}, } ], - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -218,7 +218,7 @@ async def test_streaming_response_insert(self, async_client: AsyncLlamaStackClie "metadata": {"foo": True}, } ], - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -232,7 +232,7 @@ async def test_streaming_response_insert(self, async_client: AsyncLlamaStackClie async def test_method_query(self, async_client: AsyncLlamaStackClient) -> None: vector_io = await async_client.vector_io.query( query="string", - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) assert_matches_type(QueryChunksResponse, vector_io, path=["response"]) @@ -240,7 +240,7 @@ async def test_method_query(self, async_client: AsyncLlamaStackClient) -> None: async def test_method_query_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: vector_io = await async_client.vector_io.query( query="string", - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", params={"foo": True}, ) assert_matches_type(QueryChunksResponse, vector_io, path=["response"]) @@ -249,7 +249,7 @@ async def test_method_query_with_all_params(self, async_client: AsyncLlamaStackC async def test_raw_response_query(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.vector_io.with_raw_response.query( query="string", - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -261,7 +261,7 @@ async def test_raw_response_query(self, async_client: AsyncLlamaStackClient) -> async def test_streaming_response_query(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.vector_io.with_streaming_response.query( query="string", - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/tool_runtime/test_rag_tool.py b/tests/api_resources/tool_runtime/test_rag_tool.py index 8dd7e752..fd743058 100644 --- a/tests/api_resources/tool_runtime/test_rag_tool.py +++ b/tests/api_resources/tool_runtime/test_rag_tool.py @@ -34,7 +34,7 @@ def test_method_insert(self, client: LlamaStackClient) -> None: "metadata": {"foo": True}, } ], - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) assert rag_tool is None @@ -49,7 +49,7 @@ def test_raw_response_insert(self, client: LlamaStackClient) -> None: "metadata": {"foo": True}, } ], - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -68,7 +68,7 @@ def test_streaming_response_insert(self, client: LlamaStackClient) -> None: "metadata": {"foo": True}, } ], - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -82,7 +82,7 @@ def test_streaming_response_insert(self, client: LlamaStackClient) -> None: def test_method_query(self, client: LlamaStackClient) -> None: rag_tool = client.tool_runtime.rag_tool.query( content="string", - vector_db_ids=["string"], + vector_store_ids=["string"], ) assert_matches_type(QueryResult, rag_tool, path=["response"]) @@ -90,7 +90,7 @@ def test_method_query(self, client: LlamaStackClient) -> None: def test_method_query_with_all_params(self, client: LlamaStackClient) -> None: rag_tool = client.tool_runtime.rag_tool.query( content="string", - vector_db_ids=["string"], + vector_store_ids=["string"], query_config={ "chunk_template": "chunk_template", "max_chunks": 0, @@ -112,7 +112,7 @@ def test_method_query_with_all_params(self, client: LlamaStackClient) -> None: def test_raw_response_query(self, client: LlamaStackClient) -> None: response = client.tool_runtime.rag_tool.with_raw_response.query( content="string", - vector_db_ids=["string"], + vector_store_ids=["string"], ) assert response.is_closed is True @@ -124,7 +124,7 @@ def test_raw_response_query(self, client: LlamaStackClient) -> None: def test_streaming_response_query(self, client: LlamaStackClient) -> None: with client.tool_runtime.rag_tool.with_streaming_response.query( content="string", - vector_db_ids=["string"], + vector_store_ids=["string"], ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -151,7 +151,7 @@ async def test_method_insert(self, async_client: AsyncLlamaStackClient) -> None: "metadata": {"foo": True}, } ], - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) assert rag_tool is None @@ -166,7 +166,7 @@ async def test_raw_response_insert(self, async_client: AsyncLlamaStackClient) -> "metadata": {"foo": True}, } ], - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) assert response.is_closed is True @@ -185,7 +185,7 @@ async def test_streaming_response_insert(self, async_client: AsyncLlamaStackClie "metadata": {"foo": True}, } ], - vector_db_id="vector_db_id", + vector_store_id="vector_store_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -199,7 +199,7 @@ async def test_streaming_response_insert(self, async_client: AsyncLlamaStackClie async def test_method_query(self, async_client: AsyncLlamaStackClient) -> None: rag_tool = await async_client.tool_runtime.rag_tool.query( content="string", - vector_db_ids=["string"], + vector_store_ids=["string"], ) assert_matches_type(QueryResult, rag_tool, path=["response"]) @@ -207,7 +207,7 @@ async def test_method_query(self, async_client: AsyncLlamaStackClient) -> None: async def test_method_query_with_all_params(self, async_client: AsyncLlamaStackClient) -> None: rag_tool = await async_client.tool_runtime.rag_tool.query( content="string", - vector_db_ids=["string"], + vector_store_ids=["string"], query_config={ "chunk_template": "chunk_template", "max_chunks": 0, @@ -229,7 +229,7 @@ async def test_method_query_with_all_params(self, async_client: AsyncLlamaStackC async def test_raw_response_query(self, async_client: AsyncLlamaStackClient) -> None: response = await async_client.tool_runtime.rag_tool.with_raw_response.query( content="string", - vector_db_ids=["string"], + vector_store_ids=["string"], ) assert response.is_closed is True @@ -241,7 +241,7 @@ async def test_raw_response_query(self, async_client: AsyncLlamaStackClient) -> async def test_streaming_response_query(self, async_client: AsyncLlamaStackClient) -> None: async with async_client.tool_runtime.rag_tool.with_streaming_response.query( content="string", - vector_db_ids=["string"], + vector_store_ids=["string"], ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python"