Skip to content

Commit ef00216

Browse files
release: 2.10.0 (#2767)
* chore(internal): update docstring * fix(types): allow pyright to infer TypedDict types within SequenceNotStr * chore: add missing docstrings * feat(api): make model required for the responses/compact endpoint * release: 2.10.0 --------- Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
1 parent dc76021 commit ef00216

File tree

519 files changed

+3370
-136
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

519 files changed

+3370
-136
lines changed

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "2.9.0"
2+
".": "2.10.0"
33
}

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 137
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-fe8a79e6fd407e6c9afec60971f03076b65f711ccd6ea16457933b0e24fb1f6d.yml
3-
openapi_spec_hash: 38c0a73f4e08843732c5f8002a809104
4-
config_hash: 2c350086d87a4b4532077363087840e7
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-88d85ff87ad8983262af2b729762a6e05fd509468bb691529bc2f81e4ce27c69.yml
3+
openapi_spec_hash: 46a55acbccd0147534017b92c1f4dd99
4+
config_hash: 141b101c9f13b90e21af74e1686f1f41

CHANGELOG.md

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,24 @@
11
# Changelog
22

3+
## 2.10.0 (2025-12-10)
4+
5+
Full Changelog: [v2.9.0...v2.10.0](https://github.com/openai/openai-python/compare/v2.9.0...v2.10.0)
6+
7+
### Features
8+
9+
* **api:** make model required for the responses/compact endpoint ([a12936b](https://github.com/openai/openai-python/commit/a12936b18cf19009d4e6d586c9b1958359636dbe))
10+
11+
12+
### Bug Fixes
13+
14+
* **types:** allow pyright to infer TypedDict types within SequenceNotStr ([8f0d230](https://github.com/openai/openai-python/commit/8f0d23066c1edc38a6e9858b054dceaf92ae001b))
15+
16+
17+
### Chores
18+
19+
* add missing docstrings ([f20a9a1](https://github.com/openai/openai-python/commit/f20a9a18a421ba69622c77ab539509d218e774eb))
20+
* **internal:** update docstring ([9a993f2](https://github.com/openai/openai-python/commit/9a993f2261b6524aa30b955e006c7ea89f086968))
21+
322
## 2.9.0 (2025-12-04)
423

524
Full Changelog: [v2.8.1...v2.9.0](https://github.com/openai/openai-python/compare/v2.8.1...v2.9.0)

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "openai"
3-
version = "2.9.0"
3+
version = "2.10.0"
44
description = "The official Python library for the openai API"
55
dynamic = ["readme"]
66
license = "Apache-2.0"

src/openai/_types.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -247,6 +247,9 @@ class HttpxSendArgs(TypedDict, total=False):
247247
if TYPE_CHECKING:
248248
# This works because str.__contains__ does not accept object (either in typeshed or at runtime)
249249
# https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285
250+
#
251+
# Note: index() and count() methods are intentionally omitted to allow pyright to properly
252+
# infer TypedDict types when dict literals are used in lists assigned to SequenceNotStr.
250253
class SequenceNotStr(Protocol[_T_co]):
251254
@overload
252255
def __getitem__(self, index: SupportsIndex, /) -> _T_co: ...
@@ -255,8 +258,6 @@ def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ...
255258
def __contains__(self, value: object, /) -> bool: ...
256259
def __len__(self) -> int: ...
257260
def __iter__(self) -> Iterator[_T_co]: ...
258-
def index(self, value: Any, start: int = 0, stop: int = ..., /) -> int: ...
259-
def count(self, value: Any, /) -> int: ...
260261
def __reversed__(self) -> Iterator[_T_co]: ...
261262
else:
262263
# just point this to a normal `Sequence` at runtime to avoid having to special case

src/openai/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

33
__title__ = "openai"
4-
__version__ = "2.9.0" # x-release-please-version
4+
__version__ = "2.10.0" # x-release-please-version

src/openai/resources/realtime/realtime.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ def calls(self) -> AsyncCallsWithStreamingResponse:
232232

233233

234234
class AsyncRealtimeConnection:
235-
"""Represents a live websocket connection to the Realtime API"""
235+
"""Represents a live WebSocket connection to the Realtime API"""
236236

237237
session: AsyncRealtimeSessionResource
238238
response: AsyncRealtimeResponseResource
@@ -421,7 +421,7 @@ async def __aexit__(
421421

422422

423423
class RealtimeConnection:
424-
"""Represents a live websocket connection to the Realtime API"""
424+
"""Represents a live WebSocket connection to the Realtime API"""
425425

426426
session: RealtimeSessionResource
427427
response: RealtimeResponseResource

src/openai/resources/responses/responses.py

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1526,8 +1526,6 @@ def cancel(
15261526
def compact(
15271527
self,
15281528
*,
1529-
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
1530-
instructions: Optional[str] | Omit = omit,
15311529
model: Union[
15321530
Literal[
15331531
"gpt-5.1",
@@ -1614,8 +1612,9 @@ def compact(
16141612
],
16151613
str,
16161614
None,
1617-
]
1618-
| Omit = omit,
1615+
],
1616+
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
1617+
instructions: Optional[str] | Omit = omit,
16191618
previous_response_id: Optional[str] | Omit = omit,
16201619
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
16211620
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1628,19 +1627,19 @@ def compact(
16281627
Compact conversation
16291628
16301629
Args:
1630+
model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
1631+
wide range of models with different capabilities, performance characteristics,
1632+
and price points. Refer to the
1633+
[model guide](https://platform.openai.com/docs/models) to browse and compare
1634+
available models.
1635+
16311636
input: Text, image, or file inputs to the model, used to generate a response
16321637
16331638
instructions: A system (or developer) message inserted into the model's context. When used
16341639
along with `previous_response_id`, the instructions from a previous response
16351640
will not be carried over to the next response. This makes it simple to swap out
16361641
system (or developer) messages in new responses.
16371642
1638-
model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
1639-
wide range of models with different capabilities, performance characteristics,
1640-
and price points. Refer to the
1641-
[model guide](https://platform.openai.com/docs/models) to browse and compare
1642-
available models.
1643-
16441643
previous_response_id: The unique ID of the previous response to the model. Use this to create
16451644
multi-turn conversations. Learn more about
16461645
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
@@ -1658,9 +1657,9 @@ def compact(
16581657
"/responses/compact",
16591658
body=maybe_transform(
16601659
{
1660+
"model": model,
16611661
"input": input,
16621662
"instructions": instructions,
1663-
"model": model,
16641663
"previous_response_id": previous_response_id,
16651664
},
16661665
response_compact_params.ResponseCompactParams,
@@ -3140,8 +3139,6 @@ async def cancel(
31403139
async def compact(
31413140
self,
31423141
*,
3143-
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
3144-
instructions: Optional[str] | Omit = omit,
31453142
model: Union[
31463143
Literal[
31473144
"gpt-5.1",
@@ -3228,8 +3225,9 @@ async def compact(
32283225
],
32293226
str,
32303227
None,
3231-
]
3232-
| Omit = omit,
3228+
],
3229+
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
3230+
instructions: Optional[str] | Omit = omit,
32333231
previous_response_id: Optional[str] | Omit = omit,
32343232
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
32353233
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -3242,19 +3240,19 @@ async def compact(
32423240
Compact conversation
32433241
32443242
Args:
3243+
model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
3244+
wide range of models with different capabilities, performance characteristics,
3245+
and price points. Refer to the
3246+
[model guide](https://platform.openai.com/docs/models) to browse and compare
3247+
available models.
3248+
32453249
input: Text, image, or file inputs to the model, used to generate a response
32463250
32473251
instructions: A system (or developer) message inserted into the model's context. When used
32483252
along with `previous_response_id`, the instructions from a previous response
32493253
will not be carried over to the next response. This makes it simple to swap out
32503254
system (or developer) messages in new responses.
32513255
3252-
model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
3253-
wide range of models with different capabilities, performance characteristics,
3254-
and price points. Refer to the
3255-
[model guide](https://platform.openai.com/docs/models) to browse and compare
3256-
available models.
3257-
32583256
previous_response_id: The unique ID of the previous response to the model. Use this to create
32593257
multi-turn conversations. Learn more about
32603258
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
@@ -3272,9 +3270,9 @@ async def compact(
32723270
"/responses/compact",
32733271
body=await async_maybe_transform(
32743272
{
3273+
"model": model,
32753274
"input": input,
32763275
"instructions": instructions,
3277-
"model": model,
32783276
"previous_response_id": previous_response_id,
32793277
},
32803278
response_compact_params.ResponseCompactParams,

src/openai/types/audio/transcription.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@ class Logprob(BaseModel):
2121

2222

2323
class UsageTokensInputTokenDetails(BaseModel):
24+
"""Details about the input tokens billed for this request."""
25+
2426
audio_tokens: Optional[int] = None
2527
"""Number of audio tokens billed for this request."""
2628

@@ -29,6 +31,8 @@ class UsageTokensInputTokenDetails(BaseModel):
2931

3032

3133
class UsageTokens(BaseModel):
34+
"""Usage statistics for models billed by token usage."""
35+
3236
input_tokens: int
3337
"""Number of input tokens billed for this request."""
3438

@@ -46,6 +50,8 @@ class UsageTokens(BaseModel):
4650

4751

4852
class UsageDuration(BaseModel):
53+
"""Usage statistics for models billed by audio input duration."""
54+
4955
seconds: float
5056
"""Duration of the input audio in seconds."""
5157

@@ -57,6 +63,10 @@ class UsageDuration(BaseModel):
5763

5864

5965
class Transcription(BaseModel):
66+
"""
67+
Represents a transcription response returned by model, based on the provided input.
68+
"""
69+
6070
text: str
6171
"""The transcribed text."""
6272

src/openai/types/audio/transcription_diarized.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@
1111

1212

1313
class UsageTokensInputTokenDetails(BaseModel):
14+
"""Details about the input tokens billed for this request."""
15+
1416
audio_tokens: Optional[int] = None
1517
"""Number of audio tokens billed for this request."""
1618

@@ -19,6 +21,8 @@ class UsageTokensInputTokenDetails(BaseModel):
1921

2022

2123
class UsageTokens(BaseModel):
24+
"""Usage statistics for models billed by token usage."""
25+
2226
input_tokens: int
2327
"""Number of input tokens billed for this request."""
2428

@@ -36,6 +40,8 @@ class UsageTokens(BaseModel):
3640

3741

3842
class UsageDuration(BaseModel):
43+
"""Usage statistics for models billed by audio input duration."""
44+
3945
seconds: float
4046
"""Duration of the input audio in seconds."""
4147

@@ -47,6 +53,10 @@ class UsageDuration(BaseModel):
4753

4854

4955
class TranscriptionDiarized(BaseModel):
56+
"""
57+
Represents a diarized transcription response returned by the model, including the combined transcript and speaker-segment annotations.
58+
"""
59+
5060
duration: float
5161
"""Duration of the input audio in seconds."""
5262

0 commit comments

Comments
 (0)