diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index fe81c38e..51eda4ba 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.3.0-alpha.2"
+ ".": "0.3.0-alpha.3"
}
diff --git a/.stats.yml b/.stats.yml
index 448f9057..436151e8 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 109
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-4337a6181c2db17737133e944b4b660a5e00ea10dce6be3252918e39451e9b5f.yml
openapi_spec_hash: a0bc8f4b5f45bc5741fed8eaa61171c3
-config_hash: d8706905bf16d9e4141e88d5a778263b
+config_hash: 0412cd40c0609550c1a47c69dd104e4f
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 57ceb8eb..05e36acf 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,13 @@
# Changelog
+## 0.3.0-alpha.3 (2025-09-30)
+
+Full Changelog: [v0.3.0-alpha.2...v0.3.0-alpha.3](https://github.com/llamastack/llama-stack-client-python/compare/v0.3.0-alpha.2...v0.3.0-alpha.3)
+
+### Features
+
+* **api:** fix file batches SDK to list_files ([9808a5b](https://github.com/llamastack/llama-stack-client-python/commit/9808a5bc904a7e727bd003f85ec520047e6ac033))
+
## 0.3.0-alpha.2 (2025-09-30)
Full Changelog: [v0.3.0-alpha.1...v0.3.0-alpha.2](https://github.com/llamastack/llama-stack-client-python/compare/v0.3.0-alpha.1...v0.3.0-alpha.2)
diff --git a/api.md b/api.md
index 15e91db6..7efc4fae 100644
--- a/api.md
+++ b/api.md
@@ -279,8 +279,8 @@ Methods:
- client.vector_stores.file_batches.create(vector_store_id, \*\*params) -> VectorStoreFileBatches
- client.vector_stores.file_batches.retrieve(batch_id, \*, vector_store_id) -> VectorStoreFileBatches
-- client.vector_stores.file_batches.list(batch_id, \*, vector_store_id, \*\*params) -> SyncOpenAICursorPage[VectorStoreFile]
- client.vector_stores.file_batches.cancel(batch_id, \*, vector_store_id) -> VectorStoreFileBatches
+- client.vector_stores.file_batches.list_files(batch_id, \*, vector_store_id, \*\*params) -> SyncOpenAICursorPage[VectorStoreFile]
# Models
diff --git a/pyproject.toml b/pyproject.toml
index ba98bc1a..e0d567b3 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "llama_stack_client"
-version = "0.3.0-alpha.2"
+version = "0.3.0-alpha.3"
description = "The official Python library for the llama-stack-client API"
dynamic = ["readme"]
license = "MIT"
diff --git a/src/llama_stack_client/resources/vector_stores/file_batches.py b/src/llama_stack_client/resources/vector_stores/file_batches.py
index 654fdd79..bdeb14b6 100644
--- a/src/llama_stack_client/resources/vector_stores/file_batches.py
+++ b/src/llama_stack_client/resources/vector_stores/file_batches.py
@@ -18,7 +18,7 @@
)
from ...pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
from ..._base_client import AsyncPaginator, make_request_options
-from ...types.vector_stores import file_batch_list_params, file_batch_create_params
+from ...types.vector_stores import file_batch_create_params, file_batch_list_files_params
from ...types.vector_stores.vector_store_file import VectorStoreFile
from ...types.vector_stores.vector_store_file_batches import VectorStoreFileBatches
@@ -131,7 +131,43 @@ def retrieve(
cast_to=VectorStoreFileBatches,
)
- def list(
+ def cancel(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VectorStoreFileBatches:
+ """
+ Cancels a vector store file batch.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ return self._post(
+ f"/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileBatches,
+ )
+
+ def list_files(
self,
batch_id: str,
*,
@@ -194,48 +230,12 @@ def list(
"limit": limit,
"order": order,
},
- file_batch_list_params.FileBatchListParams,
+ file_batch_list_files_params.FileBatchListFilesParams,
),
),
model=VectorStoreFile,
)
- def cancel(
- self,
- batch_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> VectorStoreFileBatches:
- """
- Cancels a vector store file batch.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return self._post(
- f"/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileBatches,
- )
-
class AsyncFileBatchesResource(AsyncAPIResource):
@cached_property
@@ -343,7 +343,43 @@ async def retrieve(
cast_to=VectorStoreFileBatches,
)
- def list(
+ async def cancel(
+ self,
+ batch_id: str,
+ *,
+ vector_store_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> VectorStoreFileBatches:
+ """
+ Cancels a vector store file batch.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not vector_store_id:
+ raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ return await self._post(
+ f"/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=VectorStoreFileBatches,
+ )
+
+ def list_files(
self,
batch_id: str,
*,
@@ -406,48 +442,12 @@ def list(
"limit": limit,
"order": order,
},
- file_batch_list_params.FileBatchListParams,
+ file_batch_list_files_params.FileBatchListFilesParams,
),
),
model=VectorStoreFile,
)
- async def cancel(
- self,
- batch_id: str,
- *,
- vector_store_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> VectorStoreFileBatches:
- """
- Cancels a vector store file batch.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not vector_store_id:
- raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
- if not batch_id:
- raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
- return await self._post(
- f"/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=VectorStoreFileBatches,
- )
-
class FileBatchesResourceWithRawResponse:
def __init__(self, file_batches: FileBatchesResource) -> None:
@@ -459,12 +459,12 @@ def __init__(self, file_batches: FileBatchesResource) -> None:
self.retrieve = to_raw_response_wrapper(
file_batches.retrieve,
)
- self.list = to_raw_response_wrapper(
- file_batches.list,
- )
self.cancel = to_raw_response_wrapper(
file_batches.cancel,
)
+ self.list_files = to_raw_response_wrapper(
+ file_batches.list_files,
+ )
class AsyncFileBatchesResourceWithRawResponse:
@@ -477,12 +477,12 @@ def __init__(self, file_batches: AsyncFileBatchesResource) -> None:
self.retrieve = async_to_raw_response_wrapper(
file_batches.retrieve,
)
- self.list = async_to_raw_response_wrapper(
- file_batches.list,
- )
self.cancel = async_to_raw_response_wrapper(
file_batches.cancel,
)
+ self.list_files = async_to_raw_response_wrapper(
+ file_batches.list_files,
+ )
class FileBatchesResourceWithStreamingResponse:
@@ -495,12 +495,12 @@ def __init__(self, file_batches: FileBatchesResource) -> None:
self.retrieve = to_streamed_response_wrapper(
file_batches.retrieve,
)
- self.list = to_streamed_response_wrapper(
- file_batches.list,
- )
self.cancel = to_streamed_response_wrapper(
file_batches.cancel,
)
+ self.list_files = to_streamed_response_wrapper(
+ file_batches.list_files,
+ )
class AsyncFileBatchesResourceWithStreamingResponse:
@@ -513,9 +513,9 @@ def __init__(self, file_batches: AsyncFileBatchesResource) -> None:
self.retrieve = async_to_streamed_response_wrapper(
file_batches.retrieve,
)
- self.list = async_to_streamed_response_wrapper(
- file_batches.list,
- )
self.cancel = async_to_streamed_response_wrapper(
file_batches.cancel,
)
+ self.list_files = async_to_streamed_response_wrapper(
+ file_batches.list_files,
+ )
diff --git a/src/llama_stack_client/types/vector_stores/__init__.py b/src/llama_stack_client/types/vector_stores/__init__.py
index 677030d9..ee2835d8 100644
--- a/src/llama_stack_client/types/vector_stores/__init__.py
+++ b/src/llama_stack_client/types/vector_stores/__init__.py
@@ -8,9 +8,9 @@
from .file_update_params import FileUpdateParams as FileUpdateParams
from .file_delete_response import FileDeleteResponse as FileDeleteResponse
from .file_content_response import FileContentResponse as FileContentResponse
-from .file_batch_list_params import FileBatchListParams as FileBatchListParams
from .file_batch_create_params import FileBatchCreateParams as FileBatchCreateParams
from .vector_store_file_batches import VectorStoreFileBatches as VectorStoreFileBatches
+from .file_batch_list_files_params import FileBatchListFilesParams as FileBatchListFilesParams
from .list_vector_store_files_in_batch_response import (
ListVectorStoreFilesInBatchResponse as ListVectorStoreFilesInBatchResponse,
)
diff --git a/src/llama_stack_client/types/vector_stores/file_batch_list_params.py b/src/llama_stack_client/types/vector_stores/file_batch_list_files_params.py
similarity index 90%
rename from src/llama_stack_client/types/vector_stores/file_batch_list_params.py
rename to src/llama_stack_client/types/vector_stores/file_batch_list_files_params.py
index 79e67eb1..2ffa8417 100644
--- a/src/llama_stack_client/types/vector_stores/file_batch_list_params.py
+++ b/src/llama_stack_client/types/vector_stores/file_batch_list_files_params.py
@@ -4,10 +4,10 @@
from typing_extensions import Required, TypedDict
-__all__ = ["FileBatchListParams"]
+__all__ = ["FileBatchListFilesParams"]
-class FileBatchListParams(TypedDict, total=False):
+class FileBatchListFilesParams(TypedDict, total=False):
vector_store_id: Required[str]
after: str
diff --git a/tests/api_resources/vector_stores/test_file_batches.py b/tests/api_resources/vector_stores/test_file_batches.py
index b92f31b2..9cc63496 100644
--- a/tests/api_resources/vector_stores/test_file_batches.py
+++ b/tests/api_resources/vector_stores/test_file_batches.py
@@ -122,29 +122,16 @@ def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
)
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
- file_batch = client.vector_stores.file_batches.list(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
- assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
-
- @parametrize
- def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
- file_batch = client.vector_stores.file_batches.list(
+ def test_method_cancel(self, client: LlamaStackClient) -> None:
+ file_batch = client.vector_stores.file_batches.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
- after="after",
- before="before",
- filter="filter",
- limit=0,
- order="order",
)
- assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
+ assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
- response = client.vector_stores.file_batches.with_raw_response.list(
+ def test_raw_response_cancel(self, client: LlamaStackClient) -> None:
+ response = client.vector_stores.file_batches.with_raw_response.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
)
@@ -152,11 +139,11 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = response.parse()
- assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
+ assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
- with client.vector_stores.file_batches.with_streaming_response.list(
+ def test_streaming_response_cancel(self, client: LlamaStackClient) -> None:
+ with client.vector_stores.file_batches.with_streaming_response.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
) as response:
@@ -164,35 +151,48 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = response.parse()
- assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
+ assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_list(self, client: LlamaStackClient) -> None:
+ def test_path_params_cancel(self, client: LlamaStackClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.list(
+ client.vector_stores.file_batches.with_raw_response.cancel(
batch_id="batch_id",
vector_store_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.list(
+ client.vector_stores.file_batches.with_raw_response.cancel(
batch_id="",
vector_store_id="vector_store_id",
)
@parametrize
- def test_method_cancel(self, client: LlamaStackClient) -> None:
- file_batch = client.vector_stores.file_batches.cancel(
+ def test_method_list_files(self, client: LlamaStackClient) -> None:
+ file_batch = client.vector_stores.file_batches.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
)
- assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
+ assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
@parametrize
- def test_raw_response_cancel(self, client: LlamaStackClient) -> None:
- response = client.vector_stores.file_batches.with_raw_response.cancel(
+ def test_method_list_files_with_all_params(self, client: LlamaStackClient) -> None:
+ file_batch = client.vector_stores.file_batches.list_files(
+ batch_id="batch_id",
+ vector_store_id="vector_store_id",
+ after="after",
+ before="before",
+ filter="filter",
+ limit=0,
+ order="order",
+ )
+ assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
+
+ @parametrize
+ def test_raw_response_list_files(self, client: LlamaStackClient) -> None:
+ response = client.vector_stores.file_batches.with_raw_response.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
)
@@ -200,11 +200,11 @@ def test_raw_response_cancel(self, client: LlamaStackClient) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = response.parse()
- assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
+ assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
@parametrize
- def test_streaming_response_cancel(self, client: LlamaStackClient) -> None:
- with client.vector_stores.file_batches.with_streaming_response.cancel(
+ def test_streaming_response_list_files(self, client: LlamaStackClient) -> None:
+ with client.vector_stores.file_batches.with_streaming_response.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
) as response:
@@ -212,20 +212,20 @@ def test_streaming_response_cancel(self, client: LlamaStackClient) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = response.parse()
- assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
+ assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_cancel(self, client: LlamaStackClient) -> None:
+ def test_path_params_list_files(self, client: LlamaStackClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.cancel(
+ client.vector_stores.file_batches.with_raw_response.list_files(
batch_id="batch_id",
vector_store_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- client.vector_stores.file_batches.with_raw_response.cancel(
+ client.vector_stores.file_batches.with_raw_response.list_files(
batch_id="",
vector_store_id="vector_store_id",
)
@@ -337,29 +337,16 @@ async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
- file_batch = await async_client.vector_stores.file_batches.list(
- batch_id="batch_id",
- vector_store_id="vector_store_id",
- )
- assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
-
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- file_batch = await async_client.vector_stores.file_batches.list(
+ async def test_method_cancel(self, async_client: AsyncLlamaStackClient) -> None:
+ file_batch = await async_client.vector_stores.file_batches.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
- after="after",
- before="before",
- filter="filter",
- limit=0,
- order="order",
)
- assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
+ assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.vector_stores.file_batches.with_raw_response.list(
+ async def test_raw_response_cancel(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.vector_stores.file_batches.with_raw_response.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
)
@@ -367,11 +354,11 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = await response.parse()
- assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
+ assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.vector_stores.file_batches.with_streaming_response.list(
+ async def test_streaming_response_cancel(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.vector_stores.file_batches.with_streaming_response.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
) as response:
@@ -379,35 +366,48 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = await response.parse()
- assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
+ assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_cancel(self, async_client: AsyncLlamaStackClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.list(
+ await async_client.vector_stores.file_batches.with_raw_response.cancel(
batch_id="batch_id",
vector_store_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.list(
+ await async_client.vector_stores.file_batches.with_raw_response.cancel(
batch_id="",
vector_store_id="vector_store_id",
)
@parametrize
- async def test_method_cancel(self, async_client: AsyncLlamaStackClient) -> None:
- file_batch = await async_client.vector_stores.file_batches.cancel(
+ async def test_method_list_files(self, async_client: AsyncLlamaStackClient) -> None:
+ file_batch = await async_client.vector_stores.file_batches.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
)
- assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
+ assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
@parametrize
- async def test_raw_response_cancel(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.vector_stores.file_batches.with_raw_response.cancel(
+ async def test_method_list_files_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ file_batch = await async_client.vector_stores.file_batches.list_files(
+ batch_id="batch_id",
+ vector_store_id="vector_store_id",
+ after="after",
+ before="before",
+ filter="filter",
+ limit=0,
+ order="order",
+ )
+ assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list_files(self, async_client: AsyncLlamaStackClient) -> None:
+ response = await async_client.vector_stores.file_batches.with_raw_response.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
)
@@ -415,11 +415,11 @@ async def test_raw_response_cancel(self, async_client: AsyncLlamaStackClient) ->
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = await response.parse()
- assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
+ assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
@parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.vector_stores.file_batches.with_streaming_response.cancel(
+ async def test_streaming_response_list_files(self, async_client: AsyncLlamaStackClient) -> None:
+ async with async_client.vector_stores.file_batches.with_streaming_response.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
) as response:
@@ -427,20 +427,20 @@ async def test_streaming_response_cancel(self, async_client: AsyncLlamaStackClie
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = await response.parse()
- assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
+ assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_cancel(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_list_files(self, async_client: AsyncLlamaStackClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.cancel(
+ await async_client.vector_stores.file_batches.with_raw_response.list_files(
batch_id="batch_id",
vector_store_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
- await async_client.vector_stores.file_batches.with_raw_response.cancel(
+ await async_client.vector_stores.file_batches.with_raw_response.list_files(
batch_id="",
vector_store_id="vector_store_id",
)