Skip to content

Commit 3680c9f

Browse files
feat(api): several updates including Conversations, Responses changes, etc.
1 parent b8bfc9c commit 3680c9f

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+4005
-102
lines changed

.stats.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
configured_endpoints: 108
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-b220f9f8667d2af8007134d0403b24452c20c9c512ca87d0b69b20b761272609.yml
3-
openapi_spec_hash: cde1096a830f2081d68f858f020fd53f
4-
config_hash: 8800bdff1a087b9d5211dda2a7b9f66f
1+
configured_endpoints: 115
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-8a12a05ba6892999ac506f69d5cbbc7218f28ee1a11bf8e0e548c603435bb643.yml
3+
openapi_spec_hash: 871ce212a98bdad4a44ec7fbf58d9fcb
4+
config_hash: 85d9db5422f2cf897267c0e4825ce1bf

api.md

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,39 @@ Methods:
102102

103103
- <code title="get /v1/responses/{response_id}/input_items">client.responses.input_items.<a href="./src/llama_stack_client/resources/responses/input_items.py">list</a>(response_id, \*\*<a href="src/llama_stack_client/types/responses/input_item_list_params.py">params</a>) -> <a href="./src/llama_stack_client/types/responses/input_item_list_response.py">InputItemListResponse</a></code>
104104

105+
# Conversations
106+
107+
Types:
108+
109+
```python
110+
from llama_stack_client.types import ConversationObject, ConversationDeleteResponse
111+
```
112+
113+
Methods:
114+
115+
- <code title="post /v1/conversations">client.conversations.<a href="./src/llama_stack_client/resources/conversations/conversations.py">create</a>(\*\*<a href="src/llama_stack_client/types/conversation_create_params.py">params</a>) -> <a href="./src/llama_stack_client/types/conversation_object.py">ConversationObject</a></code>
116+
- <code title="get /v1/conversations/{conversation_id}">client.conversations.<a href="./src/llama_stack_client/resources/conversations/conversations.py">retrieve</a>(conversation_id) -> <a href="./src/llama_stack_client/types/conversation_object.py">ConversationObject</a></code>
117+
- <code title="post /v1/conversations/{conversation_id}">client.conversations.<a href="./src/llama_stack_client/resources/conversations/conversations.py">update</a>(conversation_id, \*\*<a href="src/llama_stack_client/types/conversation_update_params.py">params</a>) -> <a href="./src/llama_stack_client/types/conversation_object.py">ConversationObject</a></code>
118+
- <code title="delete /v1/conversations/{conversation_id}">client.conversations.<a href="./src/llama_stack_client/resources/conversations/conversations.py">delete</a>(conversation_id) -> <a href="./src/llama_stack_client/types/conversation_delete_response.py">ConversationDeleteResponse</a></code>
119+
120+
## Items
121+
122+
Types:
123+
124+
```python
125+
from llama_stack_client.types.conversations import (
126+
ItemCreateResponse,
127+
ItemListResponse,
128+
ItemGetResponse,
129+
)
130+
```
131+
132+
Methods:
133+
134+
- <code title="post /v1/conversations/{conversation_id}/items">client.conversations.items.<a href="./src/llama_stack_client/resources/conversations/items.py">create</a>(conversation_id, \*\*<a href="src/llama_stack_client/types/conversations/item_create_params.py">params</a>) -> <a href="./src/llama_stack_client/types/conversations/item_create_response.py">ItemCreateResponse</a></code>
135+
- <code title="get /v1/conversations/{conversation_id}/items">client.conversations.items.<a href="./src/llama_stack_client/resources/conversations/items.py">list</a>(conversation_id, \*\*<a href="src/llama_stack_client/types/conversations/item_list_params.py">params</a>) -> <a href="./src/llama_stack_client/types/conversations/item_list_response.py">ItemListResponse</a></code>
136+
- <code title="get /v1/conversations/{conversation_id}/items/{item_id}">client.conversations.items.<a href="./src/llama_stack_client/resources/conversations/items.py">get</a>(item_id, \*, conversation_id) -> <a href="./src/llama_stack_client/types/conversations/item_get_response.py">ItemGetResponse</a></code>
137+
105138
# Datasets
106139

107140
Types:

src/llama_stack_client/_client.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@
5555
completions,
5656
moderations,
5757
tool_runtime,
58+
conversations,
5859
vector_stores,
5960
scoring_functions,
6061
synthetic_data_generation,
@@ -86,6 +87,7 @@
8687
AsyncSyntheticDataGenerationResource,
8788
)
8889
from .resources.tool_runtime.tool_runtime import ToolRuntimeResource, AsyncToolRuntimeResource
90+
from .resources.conversations.conversations import ConversationsResource, AsyncConversationsResource
8991
from .resources.vector_stores.vector_stores import VectorStoresResource, AsyncVectorStoresResource
9092

9193
__all__ = [
@@ -181,6 +183,12 @@ def responses(self) -> ResponsesResource:
181183

182184
return ResponsesResource(self)
183185

186+
@cached_property
187+
def conversations(self) -> ConversationsResource:
188+
from .resources.conversations import ConversationsResource
189+
190+
return ConversationsResource(self)
191+
184192
@cached_property
185193
def datasets(self) -> DatasetsResource:
186194
from .resources.datasets import DatasetsResource
@@ -503,6 +511,12 @@ def responses(self) -> AsyncResponsesResource:
503511

504512
return AsyncResponsesResource(self)
505513

514+
@cached_property
515+
def conversations(self) -> AsyncConversationsResource:
516+
from .resources.conversations import AsyncConversationsResource
517+
518+
return AsyncConversationsResource(self)
519+
506520
@cached_property
507521
def datasets(self) -> AsyncDatasetsResource:
508522
from .resources.datasets import AsyncDatasetsResource
@@ -774,6 +788,12 @@ def responses(self) -> responses.ResponsesResourceWithRawResponse:
774788

775789
return ResponsesResourceWithRawResponse(self._client.responses)
776790

791+
@cached_property
792+
def conversations(self) -> conversations.ConversationsResourceWithRawResponse:
793+
from .resources.conversations import ConversationsResourceWithRawResponse
794+
795+
return ConversationsResourceWithRawResponse(self._client.conversations)
796+
777797
@cached_property
778798
def datasets(self) -> datasets.DatasetsResourceWithRawResponse:
779799
from .resources.datasets import DatasetsResourceWithRawResponse
@@ -931,6 +951,12 @@ def responses(self) -> responses.AsyncResponsesResourceWithRawResponse:
931951

932952
return AsyncResponsesResourceWithRawResponse(self._client.responses)
933953

954+
@cached_property
955+
def conversations(self) -> conversations.AsyncConversationsResourceWithRawResponse:
956+
from .resources.conversations import AsyncConversationsResourceWithRawResponse
957+
958+
return AsyncConversationsResourceWithRawResponse(self._client.conversations)
959+
934960
@cached_property
935961
def datasets(self) -> datasets.AsyncDatasetsResourceWithRawResponse:
936962
from .resources.datasets import AsyncDatasetsResourceWithRawResponse
@@ -1090,6 +1116,12 @@ def responses(self) -> responses.ResponsesResourceWithStreamingResponse:
10901116

10911117
return ResponsesResourceWithStreamingResponse(self._client.responses)
10921118

1119+
@cached_property
1120+
def conversations(self) -> conversations.ConversationsResourceWithStreamingResponse:
1121+
from .resources.conversations import ConversationsResourceWithStreamingResponse
1122+
1123+
return ConversationsResourceWithStreamingResponse(self._client.conversations)
1124+
10931125
@cached_property
10941126
def datasets(self) -> datasets.DatasetsResourceWithStreamingResponse:
10951127
from .resources.datasets import DatasetsResourceWithStreamingResponse
@@ -1249,6 +1281,12 @@ def responses(self) -> responses.AsyncResponsesResourceWithStreamingResponse:
12491281

12501282
return AsyncResponsesResourceWithStreamingResponse(self._client.responses)
12511283

1284+
@cached_property
1285+
def conversations(self) -> conversations.AsyncConversationsResourceWithStreamingResponse:
1286+
from .resources.conversations import AsyncConversationsResourceWithStreamingResponse
1287+
1288+
return AsyncConversationsResourceWithStreamingResponse(self._client.conversations)
1289+
12521290
@cached_property
12531291
def datasets(self) -> datasets.AsyncDatasetsResourceWithStreamingResponse:
12541292
from .resources.datasets import AsyncDatasetsResourceWithStreamingResponse

src/llama_stack_client/resources/__init__.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,14 @@
176176
ToolRuntimeResourceWithStreamingResponse,
177177
AsyncToolRuntimeResourceWithStreamingResponse,
178178
)
179+
from .conversations import (
180+
ConversationsResource,
181+
AsyncConversationsResource,
182+
ConversationsResourceWithRawResponse,
183+
AsyncConversationsResourceWithRawResponse,
184+
ConversationsResourceWithStreamingResponse,
185+
AsyncConversationsResourceWithStreamingResponse,
186+
)
179187
from .vector_stores import (
180188
VectorStoresResource,
181189
AsyncVectorStoresResource,
@@ -226,6 +234,12 @@
226234
"AsyncResponsesResourceWithRawResponse",
227235
"ResponsesResourceWithStreamingResponse",
228236
"AsyncResponsesResourceWithStreamingResponse",
237+
"ConversationsResource",
238+
"AsyncConversationsResource",
239+
"ConversationsResourceWithRawResponse",
240+
"AsyncConversationsResourceWithRawResponse",
241+
"ConversationsResourceWithStreamingResponse",
242+
"AsyncConversationsResourceWithStreamingResponse",
229243
"DatasetsResource",
230244
"AsyncDatasetsResource",
231245
"DatasetsResourceWithRawResponse",

src/llama_stack_client/resources/chat/completions.py

Lines changed: 30 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -83,9 +83,10 @@ def create(
8383
extra_body: Body | None = None,
8484
timeout: float | httpx.Timeout | None | NotGiven = not_given,
8585
) -> CompletionCreateResponse:
86-
"""
87-
Generate an OpenAI-compatible chat completion for the given messages using the
88-
specified model.
86+
"""Create chat completions.
87+
88+
Generate an OpenAI-compatible chat completion for the
89+
given messages using the specified model.
8990
9091
Args:
9192
messages: List of messages in the conversation.
@@ -179,9 +180,10 @@ def create(
179180
extra_body: Body | None = None,
180181
timeout: float | httpx.Timeout | None | NotGiven = not_given,
181182
) -> Stream[ChatCompletionChunk]:
182-
"""
183-
Generate an OpenAI-compatible chat completion for the given messages using the
184-
specified model.
183+
"""Create chat completions.
184+
185+
Generate an OpenAI-compatible chat completion for the
186+
given messages using the specified model.
185187
186188
Args:
187189
messages: List of messages in the conversation.
@@ -275,9 +277,10 @@ def create(
275277
extra_body: Body | None = None,
276278
timeout: float | httpx.Timeout | None | NotGiven = not_given,
277279
) -> CompletionCreateResponse | Stream[ChatCompletionChunk]:
278-
"""
279-
Generate an OpenAI-compatible chat completion for the given messages using the
280-
specified model.
280+
"""Create chat completions.
281+
282+
Generate an OpenAI-compatible chat completion for the
283+
given messages using the specified model.
281284
282285
Args:
283286
messages: List of messages in the conversation.
@@ -424,7 +427,8 @@ def retrieve(
424427
extra_body: Body | None = None,
425428
timeout: float | httpx.Timeout | None | NotGiven = not_given,
426429
) -> CompletionRetrieveResponse:
427-
"""
430+
"""Get chat completion.
431+
428432
Describe a chat completion by its ID.
429433
430434
Args:
@@ -461,7 +465,7 @@ def list(
461465
timeout: float | httpx.Timeout | None | NotGiven = not_given,
462466
) -> SyncOpenAICursorPage[CompletionListResponse]:
463467
"""
464-
List all chat completions.
468+
List chat completions.
465469
466470
Args:
467471
after: The ID of the last chat completion to return.
@@ -556,9 +560,10 @@ async def create(
556560
extra_body: Body | None = None,
557561
timeout: float | httpx.Timeout | None | NotGiven = not_given,
558562
) -> CompletionCreateResponse:
559-
"""
560-
Generate an OpenAI-compatible chat completion for the given messages using the
561-
specified model.
563+
"""Create chat completions.
564+
565+
Generate an OpenAI-compatible chat completion for the
566+
given messages using the specified model.
562567
563568
Args:
564569
messages: List of messages in the conversation.
@@ -652,9 +657,10 @@ async def create(
652657
extra_body: Body | None = None,
653658
timeout: float | httpx.Timeout | None | NotGiven = not_given,
654659
) -> AsyncStream[ChatCompletionChunk]:
655-
"""
656-
Generate an OpenAI-compatible chat completion for the given messages using the
657-
specified model.
660+
"""Create chat completions.
661+
662+
Generate an OpenAI-compatible chat completion for the
663+
given messages using the specified model.
658664
659665
Args:
660666
messages: List of messages in the conversation.
@@ -748,9 +754,10 @@ async def create(
748754
extra_body: Body | None = None,
749755
timeout: float | httpx.Timeout | None | NotGiven = not_given,
750756
) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]:
751-
"""
752-
Generate an OpenAI-compatible chat completion for the given messages using the
753-
specified model.
757+
"""Create chat completions.
758+
759+
Generate an OpenAI-compatible chat completion for the
760+
given messages using the specified model.
754761
755762
Args:
756763
messages: List of messages in the conversation.
@@ -897,7 +904,8 @@ async def retrieve(
897904
extra_body: Body | None = None,
898905
timeout: float | httpx.Timeout | None | NotGiven = not_given,
899906
) -> CompletionRetrieveResponse:
900-
"""
907+
"""Get chat completion.
908+
901909
Describe a chat completion by its ID.
902910
903911
Args:
@@ -934,7 +942,7 @@ def list(
934942
timeout: float | httpx.Timeout | None | NotGiven = not_given,
935943
) -> AsyncPaginator[CompletionListResponse, AsyncOpenAICursorPage[CompletionListResponse]]:
936944
"""
937-
List all chat completions.
945+
List chat completions.
938946
939947
Args:
940948
after: The ID of the last chat completion to return.

src/llama_stack_client/resources/completions.py

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -76,9 +76,10 @@ def create(
7676
extra_body: Body | None = None,
7777
timeout: float | httpx.Timeout | None | NotGiven = not_given,
7878
) -> CompletionCreateResponse:
79-
"""
80-
Generate an OpenAI-compatible completion for the given prompt using the
81-
specified model.
79+
"""Create completion.
80+
81+
Generate an OpenAI-compatible completion for the given prompt
82+
using the specified model.
8283
8384
Args:
8485
model: The identifier of the model to use. The model must be registered with Llama
@@ -159,9 +160,10 @@ def create(
159160
extra_body: Body | None = None,
160161
timeout: float | httpx.Timeout | None | NotGiven = not_given,
161162
) -> Stream[CompletionCreateResponse]:
162-
"""
163-
Generate an OpenAI-compatible completion for the given prompt using the
164-
specified model.
163+
"""Create completion.
164+
165+
Generate an OpenAI-compatible completion for the given prompt
166+
using the specified model.
165167
166168
Args:
167169
model: The identifier of the model to use. The model must be registered with Llama
@@ -242,9 +244,10 @@ def create(
242244
extra_body: Body | None = None,
243245
timeout: float | httpx.Timeout | None | NotGiven = not_given,
244246
) -> CompletionCreateResponse | Stream[CompletionCreateResponse]:
245-
"""
246-
Generate an OpenAI-compatible completion for the given prompt using the
247-
specified model.
247+
"""Create completion.
248+
249+
Generate an OpenAI-compatible completion for the given prompt
250+
using the specified model.
248251
249252
Args:
250253
model: The identifier of the model to use. The model must be registered with Llama
@@ -414,9 +417,10 @@ async def create(
414417
extra_body: Body | None = None,
415418
timeout: float | httpx.Timeout | None | NotGiven = not_given,
416419
) -> CompletionCreateResponse:
417-
"""
418-
Generate an OpenAI-compatible completion for the given prompt using the
419-
specified model.
420+
"""Create completion.
421+
422+
Generate an OpenAI-compatible completion for the given prompt
423+
using the specified model.
420424
421425
Args:
422426
model: The identifier of the model to use. The model must be registered with Llama
@@ -497,9 +501,10 @@ async def create(
497501
extra_body: Body | None = None,
498502
timeout: float | httpx.Timeout | None | NotGiven = not_given,
499503
) -> AsyncStream[CompletionCreateResponse]:
500-
"""
501-
Generate an OpenAI-compatible completion for the given prompt using the
502-
specified model.
504+
"""Create completion.
505+
506+
Generate an OpenAI-compatible completion for the given prompt
507+
using the specified model.
503508
504509
Args:
505510
model: The identifier of the model to use. The model must be registered with Llama
@@ -580,9 +585,10 @@ async def create(
580585
extra_body: Body | None = None,
581586
timeout: float | httpx.Timeout | None | NotGiven = not_given,
582587
) -> CompletionCreateResponse | AsyncStream[CompletionCreateResponse]:
583-
"""
584-
Generate an OpenAI-compatible completion for the given prompt using the
585-
specified model.
588+
"""Create completion.
589+
590+
Generate an OpenAI-compatible completion for the given prompt
591+
using the specified model.
586592
587593
Args:
588594
model: The identifier of the model to use. The model must be registered with Llama

0 commit comments

Comments
 (0)