Skip to content

Commit f93158c

Browse files
authored
Sync updates from stainless branch: ashwinb/dev (#101)
Simplifying the OpenAPI spec a bunch more. The Stainless config changes are here: stainless-sdks/llama-stack-config@main...ashwinb/dev
1 parent f5d4cfe commit f93158c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+658
-775
lines changed

src/llama_stack_client/resources/eval/eval.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
from ...types.job import Job
3333
from ..._base_client import make_request_options
3434
from ...types.evaluate_response import EvaluateResponse
35+
from ...types.eval_task_config_param import EvalTaskConfigParam
3536

3637
__all__ = ["EvalResource", "AsyncEvalResource"]
3738

@@ -66,7 +67,7 @@ def evaluate_rows(
6667
*,
6768
input_rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]],
6869
scoring_functions: List[str],
69-
task_config: eval_evaluate_rows_params.TaskConfig,
70+
task_config: EvalTaskConfigParam,
7071
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
7172
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
7273
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -117,7 +118,7 @@ def run_eval(
117118
self,
118119
task_id: str,
119120
*,
120-
task_config: eval_run_eval_params.TaskConfig,
121+
task_config: EvalTaskConfigParam,
121122
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
122123
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
123124
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -188,7 +189,7 @@ async def evaluate_rows(
188189
*,
189190
input_rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]],
190191
scoring_functions: List[str],
191-
task_config: eval_evaluate_rows_params.TaskConfig,
192+
task_config: EvalTaskConfigParam,
192193
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
193194
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
194195
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -239,7 +240,7 @@ async def run_eval(
239240
self,
240241
task_id: str,
241242
*,
242-
task_config: eval_run_eval_params.TaskConfig,
243+
task_config: EvalTaskConfigParam,
243244
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
244245
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
245246
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.

src/llama_stack_client/resources/inference.py

Lines changed: 17 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
from .._streaming import Stream, AsyncStream
3131
from .._base_client import make_request_options
3232
from ..types.embeddings_response import EmbeddingsResponse
33+
from ..types.response_format_param import ResponseFormatParam
3334
from ..types.shared_params.message import Message
3435
from ..types.inference_completion_response import InferenceCompletionResponse
3536
from ..types.shared_params.sampling_params import SamplingParams
@@ -66,7 +67,7 @@ def chat_completion(
6667
messages: Iterable[Message],
6768
model_id: str,
6869
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
69-
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
70+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
7071
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
7172
stream: Literal[False] | NotGiven = NOT_GIVEN,
7273
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
@@ -111,7 +112,7 @@ def chat_completion(
111112
model_id: str,
112113
stream: Literal[True],
113114
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
114-
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
115+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
115116
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
116117
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
117118
tool_prompt_format: Literal["json", "function_tag", "python_list"] | NotGiven = NOT_GIVEN,
@@ -155,7 +156,7 @@ def chat_completion(
155156
model_id: str,
156157
stream: bool,
157158
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
158-
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
159+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
159160
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
160161
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
161162
tool_prompt_format: Literal["json", "function_tag", "python_list"] | NotGiven = NOT_GIVEN,
@@ -198,7 +199,7 @@ def chat_completion(
198199
messages: Iterable[Message],
199200
model_id: str,
200201
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
201-
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
202+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
202203
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
203204
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
204205
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
@@ -259,7 +260,7 @@ def completion(
259260
content: InterleavedContent,
260261
model_id: str,
261262
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
262-
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
263+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
263264
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
264265
stream: Literal[False] | NotGiven = NOT_GIVEN,
265266
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
@@ -291,7 +292,7 @@ def completion(
291292
model_id: str,
292293
stream: Literal[True],
293294
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
294-
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
295+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
295296
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
296297
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
297298
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
@@ -322,7 +323,7 @@ def completion(
322323
model_id: str,
323324
stream: bool,
324325
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
325-
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
326+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
326327
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
327328
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
328329
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
@@ -352,7 +353,7 @@ def completion(
352353
content: InterleavedContent,
353354
model_id: str,
354355
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
355-
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
356+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
356357
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
357358
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
358359
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
@@ -476,7 +477,7 @@ async def chat_completion(
476477
messages: Iterable[Message],
477478
model_id: str,
478479
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
479-
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
480+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
480481
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
481482
stream: Literal[False] | NotGiven = NOT_GIVEN,
482483
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
@@ -521,7 +522,7 @@ async def chat_completion(
521522
model_id: str,
522523
stream: Literal[True],
523524
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
524-
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
525+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
525526
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
526527
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
527528
tool_prompt_format: Literal["json", "function_tag", "python_list"] | NotGiven = NOT_GIVEN,
@@ -565,7 +566,7 @@ async def chat_completion(
565566
model_id: str,
566567
stream: bool,
567568
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
568-
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
569+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
569570
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
570571
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
571572
tool_prompt_format: Literal["json", "function_tag", "python_list"] | NotGiven = NOT_GIVEN,
@@ -608,7 +609,7 @@ async def chat_completion(
608609
messages: Iterable[Message],
609610
model_id: str,
610611
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
611-
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
612+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
612613
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
613614
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
614615
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
@@ -669,7 +670,7 @@ async def completion(
669670
content: InterleavedContent,
670671
model_id: str,
671672
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
672-
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
673+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
673674
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
674675
stream: Literal[False] | NotGiven = NOT_GIVEN,
675676
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
@@ -701,7 +702,7 @@ async def completion(
701702
model_id: str,
702703
stream: Literal[True],
703704
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
704-
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
705+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
705706
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
706707
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
707708
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
@@ -732,7 +733,7 @@ async def completion(
732733
model_id: str,
733734
stream: bool,
734735
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
735-
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
736+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
736737
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
737738
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
738739
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
@@ -762,7 +763,7 @@ async def completion(
762763
content: InterleavedContent,
763764
model_id: str,
764765
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
765-
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
766+
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
766767
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
767768
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
768769
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,

src/llama_stack_client/resources/post_training/post_training.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,10 @@
1414
JobResourceWithStreamingResponse,
1515
AsyncJobResourceWithStreamingResponse,
1616
)
17-
from ...types import post_training_preference_optimize_params, post_training_supervised_fine_tune_params
17+
from ...types import (
18+
post_training_preference_optimize_params,
19+
post_training_supervised_fine_tune_params,
20+
)
1821
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
1922
from ..._utils import (
2023
maybe_transform,
@@ -31,6 +34,7 @@
3134
)
3235
from ..._base_client import make_request_options
3336
from ...types.post_training_job import PostTrainingJob
37+
from ...types.algorithm_config_param import AlgorithmConfigParam
3438

3539
__all__ = ["PostTrainingResource", "AsyncPostTrainingResource"]
3640

@@ -123,7 +127,7 @@ def supervised_fine_tune(
123127
logger_config: Dict[str, Union[bool, float, str, Iterable[object], object, None]],
124128
model: str,
125129
training_config: post_training_supervised_fine_tune_params.TrainingConfig,
126-
algorithm_config: post_training_supervised_fine_tune_params.AlgorithmConfig | NotGiven = NOT_GIVEN,
130+
algorithm_config: AlgorithmConfigParam | NotGiven = NOT_GIVEN,
127131
checkpoint_dir: str | NotGiven = NOT_GIVEN,
128132
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
129133
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
@@ -262,7 +266,7 @@ async def supervised_fine_tune(
262266
logger_config: Dict[str, Union[bool, float, str, Iterable[object], object, None]],
263267
model: str,
264268
training_config: post_training_supervised_fine_tune_params.TrainingConfig,
265-
algorithm_config: post_training_supervised_fine_tune_params.AlgorithmConfig | NotGiven = NOT_GIVEN,
269+
algorithm_config: AlgorithmConfigParam | NotGiven = NOT_GIVEN,
266270
checkpoint_dir: str | NotGiven = NOT_GIVEN,
267271
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
268272
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,

src/llama_stack_client/resources/scoring.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
)
2424
from .._base_client import make_request_options
2525
from ..types.scoring_score_response import ScoringScoreResponse
26+
from ..types.scoring_fn_params_param import ScoringFnParamsParam
2627
from ..types.scoring_score_batch_response import ScoringScoreBatchResponse
2728

2829
__all__ = ["ScoringResource", "AsyncScoringResource"]
@@ -52,7 +53,7 @@ def score(
5253
self,
5354
*,
5455
input_rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]],
55-
scoring_functions: Dict[str, Optional[scoring_score_params.ScoringFunctions]],
56+
scoring_functions: Dict[str, Optional[ScoringFnParamsParam]],
5657
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
5758
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
5859
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -101,7 +102,7 @@ def score_batch(
101102
*,
102103
dataset_id: str,
103104
save_results_dataset: bool,
104-
scoring_functions: Dict[str, Optional[scoring_score_batch_params.ScoringFunctions]],
105+
scoring_functions: Dict[str, Optional[ScoringFnParamsParam]],
105106
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
106107
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
107108
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -171,7 +172,7 @@ async def score(
171172
self,
172173
*,
173174
input_rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]],
174-
scoring_functions: Dict[str, Optional[scoring_score_params.ScoringFunctions]],
175+
scoring_functions: Dict[str, Optional[ScoringFnParamsParam]],
175176
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
176177
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
177178
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -220,7 +221,7 @@ async def score_batch(
220221
*,
221222
dataset_id: str,
222223
save_results_dataset: bool,
223-
scoring_functions: Dict[str, Optional[scoring_score_batch_params.ScoringFunctions]],
224+
scoring_functions: Dict[str, Optional[ScoringFnParamsParam]],
224225
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
225226
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
226227
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.

src/llama_stack_client/resources/scoring_functions.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
from .._wrappers import DataWrapper
2525
from .._base_client import make_request_options
2626
from ..types.scoring_fn import ScoringFn
27+
from ..types.scoring_fn_params_param import ScoringFnParamsParam
2728
from ..types.shared_params.return_type import ReturnType
2829
from ..types.scoring_function_list_response import ScoringFunctionListResponse
2930

@@ -141,7 +142,7 @@ def register(
141142
description: str,
142143
return_type: ReturnType,
143144
scoring_fn_id: str,
144-
params: scoring_function_register_params.Params | NotGiven = NOT_GIVEN,
145+
params: ScoringFnParamsParam | NotGiven = NOT_GIVEN,
145146
provider_id: str | NotGiven = NOT_GIVEN,
146147
provider_scoring_fn_id: str | NotGiven = NOT_GIVEN,
147148
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
@@ -304,7 +305,7 @@ async def register(
304305
description: str,
305306
return_type: ReturnType,
306307
scoring_fn_id: str,
307-
params: scoring_function_register_params.Params | NotGiven = NOT_GIVEN,
308+
params: ScoringFnParamsParam | NotGiven = NOT_GIVEN,
308309
provider_id: str | NotGiven = NOT_GIVEN,
309310
provider_scoring_fn_id: str | NotGiven = NOT_GIVEN,
310311
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,

0 commit comments

Comments
 (0)