Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions src/llama_stack_client/resources/eval/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
from ...types.job import Job
from ..._base_client import make_request_options
from ...types.evaluate_response import EvaluateResponse
from ...types.eval_task_config_param import EvalTaskConfigParam

__all__ = ["EvalResource", "AsyncEvalResource"]

Expand Down Expand Up @@ -66,7 +67,7 @@ def evaluate_rows(
*,
input_rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]],
scoring_functions: List[str],
task_config: eval_evaluate_rows_params.TaskConfig,
task_config: EvalTaskConfigParam,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
Expand Down Expand Up @@ -117,7 +118,7 @@ def run_eval(
self,
task_id: str,
*,
task_config: eval_run_eval_params.TaskConfig,
task_config: EvalTaskConfigParam,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
Expand Down Expand Up @@ -188,7 +189,7 @@ async def evaluate_rows(
*,
input_rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]],
scoring_functions: List[str],
task_config: eval_evaluate_rows_params.TaskConfig,
task_config: EvalTaskConfigParam,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
Expand Down Expand Up @@ -239,7 +240,7 @@ async def run_eval(
self,
task_id: str,
*,
task_config: eval_run_eval_params.TaskConfig,
task_config: EvalTaskConfigParam,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
Expand Down
33 changes: 17 additions & 16 deletions src/llama_stack_client/resources/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
from .._streaming import Stream, AsyncStream
from .._base_client import make_request_options
from ..types.embeddings_response import EmbeddingsResponse
from ..types.response_format_param import ResponseFormatParam
from ..types.shared_params.message import Message
from ..types.inference_completion_response import InferenceCompletionResponse
from ..types.shared_params.sampling_params import SamplingParams
Expand Down Expand Up @@ -66,7 +67,7 @@ def chat_completion(
messages: Iterable[Message],
model_id: str,
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
stream: Literal[False] | NotGiven = NOT_GIVEN,
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -111,7 +112,7 @@ def chat_completion(
model_id: str,
stream: Literal[True],
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
tool_prompt_format: Literal["json", "function_tag", "python_list"] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -155,7 +156,7 @@ def chat_completion(
model_id: str,
stream: bool,
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
tool_prompt_format: Literal["json", "function_tag", "python_list"] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -198,7 +199,7 @@ def chat_completion(
messages: Iterable[Message],
model_id: str,
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -259,7 +260,7 @@ def completion(
content: InterleavedContent,
model_id: str,
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
stream: Literal[False] | NotGiven = NOT_GIVEN,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -291,7 +292,7 @@ def completion(
model_id: str,
stream: Literal[True],
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -322,7 +323,7 @@ def completion(
model_id: str,
stream: bool,
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -352,7 +353,7 @@ def completion(
content: InterleavedContent,
model_id: str,
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -476,7 +477,7 @@ async def chat_completion(
messages: Iterable[Message],
model_id: str,
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
stream: Literal[False] | NotGiven = NOT_GIVEN,
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -521,7 +522,7 @@ async def chat_completion(
model_id: str,
stream: Literal[True],
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
tool_prompt_format: Literal["json", "function_tag", "python_list"] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -565,7 +566,7 @@ async def chat_completion(
model_id: str,
stream: bool,
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
tool_prompt_format: Literal["json", "function_tag", "python_list"] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -608,7 +609,7 @@ async def chat_completion(
messages: Iterable[Message],
model_id: str,
logprobs: inference_chat_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_chat_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
tool_choice: Literal["auto", "required"] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -669,7 +670,7 @@ async def completion(
content: InterleavedContent,
model_id: str,
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
stream: Literal[False] | NotGiven = NOT_GIVEN,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -701,7 +702,7 @@ async def completion(
model_id: str,
stream: Literal[True],
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -732,7 +733,7 @@ async def completion(
model_id: str,
stream: bool,
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -762,7 +763,7 @@ async def completion(
content: InterleavedContent,
model_id: str,
logprobs: inference_completion_params.Logprobs | NotGiven = NOT_GIVEN,
response_format: inference_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
sampling_params: SamplingParams | NotGiven = NOT_GIVEN,
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
Expand Down
10 changes: 7 additions & 3 deletions src/llama_stack_client/resources/post_training/post_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,10 @@
JobResourceWithStreamingResponse,
AsyncJobResourceWithStreamingResponse,
)
from ...types import post_training_preference_optimize_params, post_training_supervised_fine_tune_params
from ...types import (
post_training_preference_optimize_params,
post_training_supervised_fine_tune_params,
)
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ..._utils import (
maybe_transform,
Expand All @@ -31,6 +34,7 @@
)
from ..._base_client import make_request_options
from ...types.post_training_job import PostTrainingJob
from ...types.algorithm_config_param import AlgorithmConfigParam

__all__ = ["PostTrainingResource", "AsyncPostTrainingResource"]

Expand Down Expand Up @@ -123,7 +127,7 @@ def supervised_fine_tune(
logger_config: Dict[str, Union[bool, float, str, Iterable[object], object, None]],
model: str,
training_config: post_training_supervised_fine_tune_params.TrainingConfig,
algorithm_config: post_training_supervised_fine_tune_params.AlgorithmConfig | NotGiven = NOT_GIVEN,
algorithm_config: AlgorithmConfigParam | NotGiven = NOT_GIVEN,
checkpoint_dir: str | NotGiven = NOT_GIVEN,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -262,7 +266,7 @@ async def supervised_fine_tune(
logger_config: Dict[str, Union[bool, float, str, Iterable[object], object, None]],
model: str,
training_config: post_training_supervised_fine_tune_params.TrainingConfig,
algorithm_config: post_training_supervised_fine_tune_params.AlgorithmConfig | NotGiven = NOT_GIVEN,
algorithm_config: AlgorithmConfigParam | NotGiven = NOT_GIVEN,
checkpoint_dir: str | NotGiven = NOT_GIVEN,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
Expand Down
9 changes: 5 additions & 4 deletions src/llama_stack_client/resources/scoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
)
from .._base_client import make_request_options
from ..types.scoring_score_response import ScoringScoreResponse
from ..types.scoring_fn_params_param import ScoringFnParamsParam
from ..types.scoring_score_batch_response import ScoringScoreBatchResponse

__all__ = ["ScoringResource", "AsyncScoringResource"]
Expand Down Expand Up @@ -52,7 +53,7 @@ def score(
self,
*,
input_rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]],
scoring_functions: Dict[str, Optional[scoring_score_params.ScoringFunctions]],
scoring_functions: Dict[str, Optional[ScoringFnParamsParam]],
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
Expand Down Expand Up @@ -101,7 +102,7 @@ def score_batch(
*,
dataset_id: str,
save_results_dataset: bool,
scoring_functions: Dict[str, Optional[scoring_score_batch_params.ScoringFunctions]],
scoring_functions: Dict[str, Optional[ScoringFnParamsParam]],
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
Expand Down Expand Up @@ -171,7 +172,7 @@ async def score(
self,
*,
input_rows: Iterable[Dict[str, Union[bool, float, str, Iterable[object], object, None]]],
scoring_functions: Dict[str, Optional[scoring_score_params.ScoringFunctions]],
scoring_functions: Dict[str, Optional[ScoringFnParamsParam]],
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
Expand Down Expand Up @@ -220,7 +221,7 @@ async def score_batch(
*,
dataset_id: str,
save_results_dataset: bool,
scoring_functions: Dict[str, Optional[scoring_score_batch_params.ScoringFunctions]],
scoring_functions: Dict[str, Optional[ScoringFnParamsParam]],
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
x_llama_stack_provider_data: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
Expand Down
5 changes: 3 additions & 2 deletions src/llama_stack_client/resources/scoring_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from .._wrappers import DataWrapper
from .._base_client import make_request_options
from ..types.scoring_fn import ScoringFn
from ..types.scoring_fn_params_param import ScoringFnParamsParam
from ..types.shared_params.return_type import ReturnType
from ..types.scoring_function_list_response import ScoringFunctionListResponse

Expand Down Expand Up @@ -141,7 +142,7 @@ def register(
description: str,
return_type: ReturnType,
scoring_fn_id: str,
params: scoring_function_register_params.Params | NotGiven = NOT_GIVEN,
params: ScoringFnParamsParam | NotGiven = NOT_GIVEN,
provider_id: str | NotGiven = NOT_GIVEN,
provider_scoring_fn_id: str | NotGiven = NOT_GIVEN,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -304,7 +305,7 @@ async def register(
description: str,
return_type: ReturnType,
scoring_fn_id: str,
params: scoring_function_register_params.Params | NotGiven = NOT_GIVEN,
params: ScoringFnParamsParam | NotGiven = NOT_GIVEN,
provider_id: str | NotGiven = NOT_GIVEN,
provider_scoring_fn_id: str | NotGiven = NOT_GIVEN,
x_llama_stack_client_version: str | NotGiven = NOT_GIVEN,
Expand Down
Loading
Loading