Skip to content

Commit 4f8bf45

Browse files
feat: add support for /responses background parameter
1 parent c0bea05 commit 4f8bf45

File tree

6 files changed

+44
-2
lines changed

6 files changed

+44
-2
lines changed

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 108
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-bffd917aa2197580fd7c0a210643e586c4a9658086c7f6559657ea35bd07f855.yml
3-
openapi_spec_hash: cd0e3133ee5fe5b0d6f4fa071cd44cc8
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-373eb8eb3cc02e6f8a9fa33079a5e735886fbf62958ee83e3cdef7bb4c41be37.yml
3+
openapi_spec_hash: fe1fa50161da4f095d128b0de7787e96
44
config_hash: 6aa61d4143c3e3df785972c0287d1370

src/llama_stack_client/resources/responses/responses.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@ def create(
7878
],
7979
],
8080
model: str,
81+
background: bool | Omit = omit,
8182
conversation: Optional[str] | Omit = omit,
8283
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
8384
include: Optional[
@@ -128,6 +129,9 @@ def create(
128129
129130
model: The underlying LLM used for completions.
130131
132+
background: Whether to run the model response in the background. When true, returns
133+
immediately with status 'queued'.
134+
131135
conversation: Optional ID of a conversation to add the response to.
132136
133137
guardrails: List of guardrails to apply during response generation.
@@ -197,6 +201,7 @@ def create(
197201
],
198202
model: str,
199203
stream: Literal[True],
204+
background: bool | Omit = omit,
200205
conversation: Optional[str] | Omit = omit,
201206
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
202207
include: Optional[
@@ -248,6 +253,9 @@ def create(
248253
249254
stream: Whether to stream the response.
250255
256+
background: Whether to run the model response in the background. When true, returns
257+
immediately with status 'queued'.
258+
251259
conversation: Optional ID of a conversation to add the response to.
252260
253261
guardrails: List of guardrails to apply during response generation.
@@ -315,6 +323,7 @@ def create(
315323
],
316324
model: str,
317325
stream: bool,
326+
background: bool | Omit = omit,
318327
conversation: Optional[str] | Omit = omit,
319328
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
320329
include: Optional[
@@ -366,6 +375,9 @@ def create(
366375
367376
stream: Whether to stream the response.
368377
378+
background: Whether to run the model response in the background. When true, returns
379+
immediately with status 'queued'.
380+
369381
conversation: Optional ID of a conversation to add the response to.
370382
371383
guardrails: List of guardrails to apply during response generation.
@@ -432,6 +444,7 @@ def create(
432444
],
433445
],
434446
model: str,
447+
background: bool | Omit = omit,
435448
conversation: Optional[str] | Omit = omit,
436449
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
437450
include: Optional[
@@ -480,6 +493,7 @@ def create(
480493
{
481494
"input": input,
482495
"model": model,
496+
"background": background,
483497
"conversation": conversation,
484498
"guardrails": guardrails,
485499
"include": include,
@@ -676,6 +690,7 @@ async def create(
676690
],
677691
],
678692
model: str,
693+
background: bool | Omit = omit,
679694
conversation: Optional[str] | Omit = omit,
680695
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
681696
include: Optional[
@@ -726,6 +741,9 @@ async def create(
726741
727742
model: The underlying LLM used for completions.
728743
744+
background: Whether to run the model response in the background. When true, returns
745+
immediately with status 'queued'.
746+
729747
conversation: Optional ID of a conversation to add the response to.
730748
731749
guardrails: List of guardrails to apply during response generation.
@@ -795,6 +813,7 @@ async def create(
795813
],
796814
model: str,
797815
stream: Literal[True],
816+
background: bool | Omit = omit,
798817
conversation: Optional[str] | Omit = omit,
799818
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
800819
include: Optional[
@@ -846,6 +865,9 @@ async def create(
846865
847866
stream: Whether to stream the response.
848867
868+
background: Whether to run the model response in the background. When true, returns
869+
immediately with status 'queued'.
870+
849871
conversation: Optional ID of a conversation to add the response to.
850872
851873
guardrails: List of guardrails to apply during response generation.
@@ -913,6 +935,7 @@ async def create(
913935
],
914936
model: str,
915937
stream: bool,
938+
background: bool | Omit = omit,
916939
conversation: Optional[str] | Omit = omit,
917940
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
918941
include: Optional[
@@ -964,6 +987,9 @@ async def create(
964987
965988
stream: Whether to stream the response.
966989
990+
background: Whether to run the model response in the background. When true, returns
991+
immediately with status 'queued'.
992+
967993
conversation: Optional ID of a conversation to add the response to.
968994
969995
guardrails: List of guardrails to apply during response generation.
@@ -1030,6 +1056,7 @@ async def create(
10301056
],
10311057
],
10321058
model: str,
1059+
background: bool | Omit = omit,
10331060
conversation: Optional[str] | Omit = omit,
10341061
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
10351062
include: Optional[
@@ -1078,6 +1105,7 @@ async def create(
10781105
{
10791106
"input": input,
10801107
"model": model,
1108+
"background": background,
10811109
"conversation": conversation,
10821110
"guardrails": guardrails,
10831111
"include": include,

src/llama_stack_client/types/response_create_params.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,12 @@ class ResponseCreateParamsBase(TypedDict, total=False):
8787
model: Required[str]
8888
"""The underlying LLM used for completions."""
8989

90+
background: bool
91+
"""Whether to run the model response in the background.
92+
93+
When true, returns immediately with status 'queued'.
94+
"""
95+
9096
conversation: Optional[str]
9197
"""Optional ID of a conversation to add the response to."""
9298

src/llama_stack_client/types/response_list_response.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1151,6 +1151,8 @@ class ResponseListResponse(BaseModel):
11511151

11521152
store: bool
11531153

1154+
background: Optional[bool] = None
1155+
11541156
completed_at: Optional[int] = None
11551157

11561158
error: Optional[Error] = None

src/llama_stack_client/types/response_object.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -755,6 +755,8 @@ def output_text(self) -> str:
755755

756756
store: bool
757757

758+
background: Optional[bool] = None
759+
758760
completed_at: Optional[int] = None
759761

760762
error: Optional[Error] = None

tests/api_resources/test_responses.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient
4141
response = client.responses.create(
4242
input="string",
4343
model="model",
44+
background=True,
4445
conversation="conversation",
4546
guardrails=["string"],
4647
include=["web_search_call.action.sources"],
@@ -129,6 +130,7 @@ def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient
129130
input="string",
130131
model="model",
131132
stream=True,
133+
background=True,
132134
conversation="conversation",
133135
guardrails=["string"],
134136
include=["web_search_call.action.sources"],
@@ -332,6 +334,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
332334
response = await async_client.responses.create(
333335
input="string",
334336
model="model",
337+
background=True,
335338
conversation="conversation",
336339
guardrails=["string"],
337340
include=["web_search_call.action.sources"],
@@ -420,6 +423,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
420423
input="string",
421424
model="model",
422425
stream=True,
426+
background=True,
423427
conversation="conversation",
424428
guardrails=["string"],
425429
include=["web_search_call.action.sources"],

0 commit comments

Comments
 (0)