Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions src/llama_stack_client/_base_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -960,6 +960,9 @@ def request(
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth

if options.follow_redirects is not None:
kwargs["follow_redirects"] = options.follow_redirects

log.debug("Sending HTTP Request: %s %s", request.method, request.url)

response = None
Expand Down Expand Up @@ -1460,6 +1463,9 @@ async def request(
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth

if options.follow_redirects is not None:
kwargs["follow_redirects"] = options.follow_redirects

log.debug("Sending HTTP Request: %s %s", request.method, request.url)

response = None
Expand Down
2 changes: 1 addition & 1 deletion src/llama_stack_client/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
datasets,
inference,
providers,
responses,
telemetry,
vector_io,
benchmarks,
Expand All @@ -53,6 +52,7 @@
from .resources.chat import chat
from .resources.eval import eval
from .resources.agents import agents
from .resources.responses import responses
from .resources.tool_runtime import tool_runtime
from .resources.post_training import post_training

Expand Down
2 changes: 2 additions & 0 deletions src/llama_stack_client/_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -737,6 +737,7 @@ class FinalRequestOptionsInput(TypedDict, total=False):
idempotency_key: str
json_data: Body
extra_json: AnyMapping
follow_redirects: bool


@final
Expand All @@ -750,6 +751,7 @@ class FinalRequestOptions(pydantic.BaseModel):
files: Union[HttpxRequestFiles, None] = None
idempotency_key: Union[str, None] = None
post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven()
follow_redirects: Union[bool, None] = None

# It should be noted that we cannot use `json` here as that would override
# a BaseModel method in an incompatible fashion.
Expand Down
2 changes: 2 additions & 0 deletions src/llama_stack_client/_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ class RequestOptions(TypedDict, total=False):
params: Query
extra_json: AnyMapping
idempotency_key: str
follow_redirects: bool


# Sentinel class used until PEP 0661 is accepted
Expand Down Expand Up @@ -215,3 +216,4 @@ class _GenericAlias(Protocol):

class HttpxSendArgs(TypedDict, total=False):
auth: httpx.Auth
follow_redirects: bool
33 changes: 33 additions & 0 deletions src/llama_stack_client/resources/responses/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from .responses import (
ResponsesResource,
AsyncResponsesResource,
ResponsesResourceWithRawResponse,
AsyncResponsesResourceWithRawResponse,
ResponsesResourceWithStreamingResponse,
AsyncResponsesResourceWithStreamingResponse,
)
from .input_items import (
InputItemsResource,
AsyncInputItemsResource,
InputItemsResourceWithRawResponse,
AsyncInputItemsResourceWithRawResponse,
InputItemsResourceWithStreamingResponse,
AsyncInputItemsResourceWithStreamingResponse,
)

__all__ = [
"InputItemsResource",
"AsyncInputItemsResource",
"InputItemsResourceWithRawResponse",
"AsyncInputItemsResourceWithRawResponse",
"InputItemsResourceWithStreamingResponse",
"AsyncInputItemsResourceWithStreamingResponse",
"ResponsesResource",
"AsyncResponsesResource",
"ResponsesResourceWithRawResponse",
"AsyncResponsesResourceWithRawResponse",
"ResponsesResourceWithStreamingResponse",
"AsyncResponsesResourceWithStreamingResponse",
]
226 changes: 226 additions & 0 deletions src/llama_stack_client/resources/responses/input_items.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,226 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from __future__ import annotations

from typing import List
from typing_extensions import Literal

import httpx

from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
to_raw_response_wrapper,
to_streamed_response_wrapper,
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
from ..._base_client import make_request_options
from ...types.responses import input_item_list_params
from ...types.responses.input_item_list_response import InputItemListResponse

__all__ = ["InputItemsResource", "AsyncInputItemsResource"]


class InputItemsResource(SyncAPIResource):
@cached_property
def with_raw_response(self) -> InputItemsResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.

For more information, see https://www.github.com/stainless-sdks/llama-stack-python#accessing-raw-response-data-eg-headers
"""
return InputItemsResourceWithRawResponse(self)

@cached_property
def with_streaming_response(self) -> InputItemsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.

For more information, see https://www.github.com/stainless-sdks/llama-stack-python#with_streaming_response
"""
return InputItemsResourceWithStreamingResponse(self)

def list(
self,
response_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
before: str | NotGiven = NOT_GIVEN,
include: List[str] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> InputItemListResponse:
"""
List input items for a given OpenAI response.

Args:
after: An item ID to list items after, used for pagination.

before: An item ID to list items before, used for pagination.

include: Additional fields to include in the response.

limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.

order: The order to return the input items in. Default is desc.

extra_headers: Send extra headers

extra_query: Add additional query parameters to the request

extra_body: Add additional JSON properties to the request

timeout: Override the client-level default timeout for this request, in seconds
"""
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._get(
f"/v1/openai/v1/responses/{response_id}/input_items",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"include": include,
"limit": limit,
"order": order,
},
input_item_list_params.InputItemListParams,
),
),
cast_to=InputItemListResponse,
)


class AsyncInputItemsResource(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncInputItemsResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.

For more information, see https://www.github.com/stainless-sdks/llama-stack-python#accessing-raw-response-data-eg-headers
"""
return AsyncInputItemsResourceWithRawResponse(self)

@cached_property
def with_streaming_response(self) -> AsyncInputItemsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.

For more information, see https://www.github.com/stainless-sdks/llama-stack-python#with_streaming_response
"""
return AsyncInputItemsResourceWithStreamingResponse(self)

async def list(
self,
response_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
before: str | NotGiven = NOT_GIVEN,
include: List[str] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> InputItemListResponse:
"""
List input items for a given OpenAI response.

Args:
after: An item ID to list items after, used for pagination.

before: An item ID to list items before, used for pagination.

include: Additional fields to include in the response.

limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.

order: The order to return the input items in. Default is desc.

extra_headers: Send extra headers

extra_query: Add additional query parameters to the request

extra_body: Add additional JSON properties to the request

timeout: Override the client-level default timeout for this request, in seconds
"""
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return await self._get(
f"/v1/openai/v1/responses/{response_id}/input_items",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform(
{
"after": after,
"before": before,
"include": include,
"limit": limit,
"order": order,
},
input_item_list_params.InputItemListParams,
),
),
cast_to=InputItemListResponse,
)


class InputItemsResourceWithRawResponse:
def __init__(self, input_items: InputItemsResource) -> None:
self._input_items = input_items

self.list = to_raw_response_wrapper(
input_items.list,
)


class AsyncInputItemsResourceWithRawResponse:
def __init__(self, input_items: AsyncInputItemsResource) -> None:
self._input_items = input_items

self.list = async_to_raw_response_wrapper(
input_items.list,
)


class InputItemsResourceWithStreamingResponse:
def __init__(self, input_items: InputItemsResource) -> None:
self._input_items = input_items

self.list = to_streamed_response_wrapper(
input_items.list,
)


class AsyncInputItemsResourceWithStreamingResponse:
def __init__(self, input_items: AsyncInputItemsResource) -> None:
self._input_items = input_items

self.list = async_to_streamed_response_wrapper(
input_items.list,
)
Loading
Loading