Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
260 changes: 70 additions & 190 deletions src/llama_stack_client/_client.py

Large diffs are not rendered by default.

8 changes: 4 additions & 4 deletions src/llama_stack_client/_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,14 +179,14 @@ def __str__(self) -> str:
@classmethod
@override
def construct( # pyright: ignore[reportIncompatibleMethodOverride]
cls: Type[ModelT],
__cls: Type[ModelT],
_fields_set: set[str] | None = None,
**values: object,
) -> ModelT:
m = cls.__new__(cls)
m = __cls.__new__(__cls)
fields_values: dict[str, object] = {}

config = get_model_config(cls)
config = get_model_config(__cls)
populate_by_name = (
config.allow_population_by_field_name
if isinstance(config, _ConfigProtocol)
Expand All @@ -196,7 +196,7 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride]
if _fields_set is None:
_fields_set = set()

model_fields = get_model_fields(cls)
model_fields = get_model_fields(__cls)
for name, field in model_fields.items():
key = field.alias
if key is None or (key not in values and populate_by_name):
Expand Down
1 change: 1 addition & 0 deletions src/llama_stack_client/types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
ReturnType as ReturnType,
AgentConfig as AgentConfig,
UserMessage as UserMessage,
ContentDelta as ContentDelta,
ScoringResult as ScoringResult,
SystemMessage as SystemMessage,
SamplingParams as SamplingParams,
Expand Down
23 changes: 3 additions & 20 deletions src/llama_stack_client/types/agents/turn_create_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
from .turn import Turn
from ..._models import BaseModel
from ..inference_step import InferenceStep
from ..shared.tool_call import ToolCall
from ..shield_call_step import ShieldCallStep
from ..tool_execution_step import ToolExecutionStep
from ..shared.content_delta import ContentDelta
from ..memory_retrieval_step import MemoryRetrievalStep

__all__ = [
Expand All @@ -18,8 +18,6 @@
"AgentTurnResponseStreamChunkEventPayload",
"AgentTurnResponseStreamChunkEventPayloadAgentTurnResponseStepStartPayload",
"AgentTurnResponseStreamChunkEventPayloadAgentTurnResponseStepProgressPayload",
"AgentTurnResponseStreamChunkEventPayloadAgentTurnResponseStepProgressPayloadToolCallDelta",
"AgentTurnResponseStreamChunkEventPayloadAgentTurnResponseStepProgressPayloadToolCallDeltaContent",
"AgentTurnResponseStreamChunkEventPayloadAgentTurnResponseStepCompletePayload",
"AgentTurnResponseStreamChunkEventPayloadAgentTurnResponseStepCompletePayloadStepDetails",
"AgentTurnResponseStreamChunkEventPayloadAgentTurnResponseTurnStartPayload",
Expand All @@ -37,30 +35,15 @@ class AgentTurnResponseStreamChunkEventPayloadAgentTurnResponseStepStartPayload(
metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None


AgentTurnResponseStreamChunkEventPayloadAgentTurnResponseStepProgressPayloadToolCallDeltaContent: TypeAlias = Union[
str, ToolCall
]


class AgentTurnResponseStreamChunkEventPayloadAgentTurnResponseStepProgressPayloadToolCallDelta(BaseModel):
content: AgentTurnResponseStreamChunkEventPayloadAgentTurnResponseStepProgressPayloadToolCallDeltaContent

parse_status: Literal["started", "in_progress", "failure", "success"]


class AgentTurnResponseStreamChunkEventPayloadAgentTurnResponseStepProgressPayload(BaseModel):
delta: ContentDelta

event_type: Literal["step_progress"]

step_id: str

step_type: Literal["inference", "tool_execution", "shield_call", "memory_retrieval"]

text_delta: Optional[str] = None

tool_call_delta: Optional[
AgentTurnResponseStreamChunkEventPayloadAgentTurnResponseStepProgressPayloadToolCallDelta
] = None


AgentTurnResponseStreamChunkEventPayloadAgentTurnResponseStepCompletePayloadStepDetails: TypeAlias = Union[
InferenceStep, ToolExecutionStep, ShieldCallStep, MemoryRetrievalStep
Expand Down
20 changes: 2 additions & 18 deletions src/llama_stack_client/types/inference_chat_completion_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from .._models import BaseModel
from .token_log_probs import TokenLogProbs
from .shared.tool_call import ToolCall
from .shared.content_delta import ContentDelta
from .shared.interleaved_content import InterleavedContent

__all__ = [
Expand All @@ -14,9 +15,6 @@
"ChatCompletionResponseCompletionMessage",
"ChatCompletionResponseStreamChunk",
"ChatCompletionResponseStreamChunkEvent",
"ChatCompletionResponseStreamChunkEventDelta",
"ChatCompletionResponseStreamChunkEventDeltaToolCallDelta",
"ChatCompletionResponseStreamChunkEventDeltaToolCallDeltaContent",
]


Expand All @@ -36,22 +34,8 @@ class ChatCompletionResponse(BaseModel):
logprobs: Optional[List[TokenLogProbs]] = None


ChatCompletionResponseStreamChunkEventDeltaToolCallDeltaContent: TypeAlias = Union[str, ToolCall]


class ChatCompletionResponseStreamChunkEventDeltaToolCallDelta(BaseModel):
content: ChatCompletionResponseStreamChunkEventDeltaToolCallDeltaContent

parse_status: Literal["started", "in_progress", "failure", "success"]


ChatCompletionResponseStreamChunkEventDelta: TypeAlias = Union[
str, ChatCompletionResponseStreamChunkEventDeltaToolCallDelta
]


class ChatCompletionResponseStreamChunkEvent(BaseModel):
delta: ChatCompletionResponseStreamChunkEventDelta
delta: ContentDelta

event_type: Literal["start", "complete", "progress"]

Expand Down
1 change: 1 addition & 0 deletions src/llama_stack_client/types/shared/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from .return_type import ReturnType as ReturnType
from .agent_config import AgentConfig as AgentConfig
from .user_message import UserMessage as UserMessage
from .content_delta import ContentDelta as ContentDelta
from .scoring_result import ScoringResult as ScoringResult
from .system_message import SystemMessage as SystemMessage
from .sampling_params import SamplingParams as SamplingParams
Expand Down
35 changes: 35 additions & 0 deletions src/llama_stack_client/types/shared/content_delta.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import Union
from typing_extensions import Literal, TypeAlias

from ..._models import BaseModel
from .tool_call import ToolCall

__all__ = ["ContentDelta", "UnionMember0", "UnionMember1", "ToolCallDelta", "ToolCallDeltaContent"]


class UnionMember0(BaseModel):
text: str

type: Literal["text"]


class UnionMember1(BaseModel):
data: str

type: Literal["image"]


ToolCallDeltaContent: TypeAlias = Union[str, ToolCall]


class ToolCallDelta(BaseModel):
content: ToolCallDeltaContent

parse_status: Literal["started", "in_progress", "failed", "succeeded"]

type: Literal["tool_call"]


ContentDelta: TypeAlias = Union[UnionMember0, UnionMember1, ToolCallDelta]
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,6 @@ class ToolResponseMessage(BaseModel):

content: InterleavedContent

role: Literal["ipython"]
role: Literal["tool"]

tool_name: Union[Literal["brave_search", "wolfram_alpha", "photogen", "code_interpreter"], str]
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,6 @@ class ToolResponseMessage(TypedDict, total=False):

content: Required[InterleavedContent]

role: Required[Literal["ipython"]]
role: Required[Literal["tool"]]

tool_name: Required[Union[Literal["brave_search", "wolfram_alpha", "photogen", "code_interpreter"], str]]
13 changes: 0 additions & 13 deletions src/llama_stack_client/types/tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,16 +38,3 @@ class Tool(BaseModel):
type: Literal["tool"]

metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None

tool_prompt_format: Optional[Literal["json", "function_tag", "python_list"]] = None
"""
`json` -- Refers to the json format for calling tools. The json format takes the
form like { "type": "function", "function" : { "name": "function_name",
"description": "function_description", "parameters": {...} } }

`function_tag` -- This is an example of how you could define your own user
defined format for making tool calls. The function_tag format looks like this,
<function=function_name>(parameters)</function>

The detailed prompts for each of these formats are added to llama cli
"""
14 changes: 0 additions & 14 deletions src/llama_stack_client/types/tool_def.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import Dict, List, Union, Optional
from typing_extensions import Literal

from .._models import BaseModel

Expand All @@ -28,16 +27,3 @@ class ToolDef(BaseModel):
metadata: Optional[Dict[str, Union[bool, float, str, List[object], object, None]]] = None

parameters: Optional[List[Parameter]] = None

tool_prompt_format: Optional[Literal["json", "function_tag", "python_list"]] = None
"""
`json` -- Refers to the json format for calling tools. The json format takes the
form like { "type": "function", "function" : { "name": "function_name",
"description": "function_description", "parameters": {...} } }

`function_tag` -- This is an example of how you could define your own user
defined format for making tool calls. The function_tag format looks like this,
<function=function_name>(parameters)</function>

The detailed prompts for each of these formats are added to llama cli
"""
15 changes: 1 addition & 14 deletions src/llama_stack_client/types/tool_def_param.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from __future__ import annotations

from typing import Dict, Union, Iterable
from typing_extensions import Literal, Required, TypedDict
from typing_extensions import Required, TypedDict

__all__ = ["ToolDefParam", "Parameter"]

Expand All @@ -28,16 +28,3 @@ class ToolDefParam(TypedDict, total=False):
metadata: Dict[str, Union[bool, float, str, Iterable[object], object, None]]

parameters: Iterable[Parameter]

tool_prompt_format: Literal["json", "function_tag", "python_list"]
"""
`json` -- Refers to the json format for calling tools. The json format takes the
form like { "type": "function", "function" : { "name": "function_name",
"description": "function_description", "parameters": {...} } }

`function_tag` -- This is an example of how you could define your own user
defined format for making tool calls. The function_tag format looks like this,
<function=function_name>(parameters)</function>

The detailed prompts for each of these formats are added to llama cli
"""
2 changes: 0 additions & 2 deletions tests/api_resources/test_agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
"default": True,
}
],
"tool_prompt_format": "json",
}
],
"input_shields": ["string"],
Expand Down Expand Up @@ -185,7 +184,6 @@ async def test_method_create_with_all_params(self, async_client: AsyncLlamaStack
"default": True,
}
],
"tool_prompt_format": "json",
}
],
"input_shields": ["string"],
Expand Down
10 changes: 10 additions & 0 deletions tests/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -844,3 +844,13 @@ class Model(BaseModel):
assert m.alias == "foo"
assert isinstance(m.union, str)
assert m.union == "bar"


@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1")
def test_field_named_cls() -> None:
class Model(BaseModel):
cls: str

m = construct_type(value={"cls": "foo"}, type_=Model)
assert isinstance(m, Model)
assert isinstance(m.cls, str)
Loading