From 02af5df52ce3230041e188f0bd52460937bbe3a0 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Wed, 11 Feb 2026 11:29:06 +0100 Subject: [PATCH 01/16] fix(openai-agents): Patch tool functions following library refactor --- .../integrations/openai_agents/__init__.py | 34 ++++- .../openai_agents/patches/__init__.py | 5 +- .../openai_agents/patches/tools.py | 123 ++++++++++-------- 3 files changed, 104 insertions(+), 58 deletions(-) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index deb136de01..b93d835dc7 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -1,8 +1,10 @@ from sentry_sdk.integrations import DidNotEnable, Integration +from sentry_sdk.utils import parse_version from .patches import ( _create_get_model_wrapper, - _create_get_all_tools_wrapper, + _create_runner_get_all_tools_wrapper, + _create_run_loop_get_all_tools_wrapper, _create_run_wrapper, _create_run_streamed_wrapper, _patch_agent_run, @@ -17,11 +19,21 @@ # after it, even if we don't use it. import agents from agents.run import DEFAULT_AGENT_RUNNER + from agents.version import __version__ as OPENAI_AGENTS_VERSION except ImportError: raise DidNotEnable("OpenAI Agents not installed") +try: + # AgentRunner methods moved in v0.8 + # https://github.com/openai/openai-agents-python/commit/3ce7c24d349b77bb750062b7e0e856d9ff48a5d5#diff-7470b3a5c5cbe2fcbb2703dc24f326f45a5819d853be2b1f395d122d278cd911 + from agents.run_internal import run_loop, turn_preparation +except ImportError: + run_loop = None + turn_preparation = None + + def _patch_runner() -> None: # Create the root span for one full agent run (including eventual handoffs) # Note agents.run.DEFAULT_AGENT_RUNNER.run_sync is a wrapper around @@ -45,9 +57,15 @@ def _patch_model() -> None: ) -def _patch_tools() -> None: +def _patch_agent_runner_get_all_tools() -> None: agents.run.AgentRunner._get_all_tools = classmethod( - _create_get_all_tools_wrapper(agents.run.AgentRunner._get_all_tools), + _create_runner_get_all_tools_wrapper(agents.run.AgentRunner._get_all_tools), + ) + + +def _patch_run_get_all_tools() -> None: + agents.run.get_all_tools = _create_run_loop_get_all_tools_wrapper( + run_loop.get_all_tools ) @@ -57,6 +75,14 @@ class OpenAIAgentsIntegration(Integration): @staticmethod def setup_once() -> None: _patch_error_tracing() - _patch_tools() _patch_model() _patch_runner() + + library_version = parse_version(OPENAI_AGENTS_VERSION) + if library_version is not None and library_version >= ( + 0, + 8, + ): + _patch_run_get_all_tools() + + _patch_agent_runner_get_all_tools() diff --git a/sentry_sdk/integrations/openai_agents/patches/__init__.py b/sentry_sdk/integrations/openai_agents/patches/__init__.py index b53ca79e19..675f8c4fc4 100644 --- a/sentry_sdk/integrations/openai_agents/patches/__init__.py +++ b/sentry_sdk/integrations/openai_agents/patches/__init__.py @@ -1,5 +1,8 @@ from .models import _create_get_model_wrapper # noqa: F401 -from .tools import _create_get_all_tools_wrapper # noqa: F401 +from .tools import ( + _create_runner_get_all_tools_wrapper, + _create_run_loop_get_all_tools_wrapper, +) # noqa: F401 from .runner import _create_run_wrapper, _create_run_streamed_wrapper # noqa: F401 from .agent_run import _patch_agent_run # noqa: F401 from .error_tracing import _patch_error_tracing # noqa: F401 diff --git a/sentry_sdk/integrations/openai_agents/patches/tools.py b/sentry_sdk/integrations/openai_agents/patches/tools.py index d14a3019aa..bb72949139 100644 --- a/sentry_sdk/integrations/openai_agents/patches/tools.py +++ b/sentry_sdk/integrations/openai_agents/patches/tools.py @@ -1,4 +1,4 @@ -from functools import wraps +from functools import wraps, partial from sentry_sdk.integrations import DidNotEnable @@ -7,7 +7,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Any, Callable + from typing import Any, Callable, Awaitable try: import agents @@ -15,13 +15,62 @@ raise DidNotEnable("OpenAI Agents not installed") -def _create_get_all_tools_wrapper( +async def _get_all_tools( + original_get_all_tools: "Callable[..., Awaitable[list[agents.Tool]]]", + agent: "agents.Agent", + context_wrapper: "agents.RunContextWrapper", +) -> "list[agents.Tool]": + # Get the original tools + tools = await original_get_all_tools(agent, context_wrapper) + + wrapped_tools = [] + for tool in tools: + # Wrap only the function tools (for now) + if tool.__class__.__name__ != "FunctionTool": + wrapped_tools.append(tool) + continue + + # Create a new FunctionTool with our wrapped invoke method + original_on_invoke = tool.on_invoke_tool + + def create_wrapped_invoke( + current_tool: "agents.Tool", current_on_invoke: "Callable[..., Any]" + ) -> "Callable[..., Any]": + @wraps(current_on_invoke) + async def sentry_wrapped_on_invoke_tool( + *args: "Any", **kwargs: "Any" + ) -> "Any": + with execute_tool_span(current_tool, *args, **kwargs) as span: + # We can not capture exceptions in tool execution here because + # `_on_invoke_tool` is swallowing the exception here: + # https://github.com/openai/openai-agents-python/blob/main/src/agents/tool.py#L409-L422 + # And because function_tool is a decorator with `default_tool_error_function` set as a default parameter + # I was unable to monkey patch it because those are evaluated at module import time + # and the SDK is too late to patch it. I was also unable to patch `_on_invoke_tool_impl` + # because it is nested inside this import time code. As if they made it hard to patch on purpose... + result = await current_on_invoke(*args, **kwargs) + update_execute_tool_span(span, agent, current_tool, result) + + return result + + return sentry_wrapped_on_invoke_tool + + wrapped_tool = agents.FunctionTool( + name=tool.name, + description=tool.description, + params_json_schema=tool.params_json_schema, + on_invoke_tool=create_wrapped_invoke(tool, original_on_invoke), + strict_json_schema=tool.strict_json_schema, + is_enabled=tool.is_enabled, + ) + wrapped_tools.append(wrapped_tool) + + return wrapped_tools + + +def _create_runner_get_all_tools_wrapper( original_get_all_tools: "Callable[..., Any]", ) -> "Callable[..., Any]": - """ - Wraps the agents.Runner._get_all_tools method of the Runner class to wrap all function tools with Sentry instrumentation. - """ - @wraps( original_get_all_tools.__func__ if hasattr(original_get_all_tools, "__func__") @@ -32,51 +81,19 @@ async def wrapped_get_all_tools( agent: "agents.Agent", context_wrapper: "agents.RunContextWrapper", ) -> "list[agents.Tool]": - # Get the original tools - tools = await original_get_all_tools(agent, context_wrapper) - - wrapped_tools = [] - for tool in tools: - # Wrap only the function tools (for now) - if tool.__class__.__name__ != "FunctionTool": - wrapped_tools.append(tool) - continue - - # Create a new FunctionTool with our wrapped invoke method - original_on_invoke = tool.on_invoke_tool - - def create_wrapped_invoke( - current_tool: "agents.Tool", current_on_invoke: "Callable[..., Any]" - ) -> "Callable[..., Any]": - @wraps(current_on_invoke) - async def sentry_wrapped_on_invoke_tool( - *args: "Any", **kwargs: "Any" - ) -> "Any": - with execute_tool_span(current_tool, *args, **kwargs) as span: - # We can not capture exceptions in tool execution here because - # `_on_invoke_tool` is swallowing the exception here: - # https://github.com/openai/openai-agents-python/blob/main/src/agents/tool.py#L409-L422 - # And because function_tool is a decorator with `default_tool_error_function` set as a default parameter - # I was unable to monkey patch it because those are evaluated at module import time - # and the SDK is too late to patch it. I was also unable to patch `_on_invoke_tool_impl` - # because it is nested inside this import time code. As if they made it hard to patch on purpose... - result = await current_on_invoke(*args, **kwargs) - update_execute_tool_span(span, agent, current_tool, result) - - return result - - return sentry_wrapped_on_invoke_tool - - wrapped_tool = agents.FunctionTool( - name=tool.name, - description=tool.description, - params_json_schema=tool.params_json_schema, - on_invoke_tool=create_wrapped_invoke(tool, original_on_invoke), - strict_json_schema=tool.strict_json_schema, - is_enabled=tool.is_enabled, - ) - wrapped_tools.append(wrapped_tool) - - return wrapped_tools + return await _get_all_tools(original_get_all_tools, agent, context_wrapper) + + return wrapped_get_all_tools + + +def _create_run_loop_get_all_tools_wrapper( + original_get_all_tools: "Callable[..., Any]", +) -> "Callable[..., Any]": + @wraps(original_get_all_tools) + async def wrapped_get_all_tools( + agent: "agents.Agent", + context_wrapper: "agents.RunContextWrapper", + ) -> "list[agents.Tool]": + return await _get_all_tools(original_get_all_tools, agent, context_wrapper) return wrapped_get_all_tools From a7f08cc705fec4a69efdf24eb9836ebcad231156 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Wed, 11 Feb 2026 11:30:35 +0100 Subject: [PATCH 02/16] . --- sentry_sdk/integrations/openai_agents/patches/tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sentry_sdk/integrations/openai_agents/patches/tools.py b/sentry_sdk/integrations/openai_agents/patches/tools.py index bb72949139..561d9449e2 100644 --- a/sentry_sdk/integrations/openai_agents/patches/tools.py +++ b/sentry_sdk/integrations/openai_agents/patches/tools.py @@ -1,4 +1,4 @@ -from functools import wraps, partial +from functools import wraps from sentry_sdk.integrations import DidNotEnable From 3679c96648e0effdd8047b12423a80d52703f07c Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Wed, 11 Feb 2026 11:31:57 +0100 Subject: [PATCH 03/16] add early return --- sentry_sdk/integrations/openai_agents/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index b93d835dc7..4395e54eb7 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -84,5 +84,6 @@ def setup_once() -> None: 8, ): _patch_run_get_all_tools() + return _patch_agent_runner_get_all_tools() From 0047eb3c932138fd3ee972fb2b274c55e86e3e3d Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Wed, 11 Feb 2026 13:37:18 +0100 Subject: [PATCH 04/16] remove indirection --- .../integrations/openai_agents/__init__.py | 45 +++++++++++-------- .../openai_agents/patches/__init__.py | 5 +-- .../openai_agents/patches/tools.py | 31 ------------- 3 files changed, 28 insertions(+), 53 deletions(-) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index 4395e54eb7..93e43f8bb3 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -1,10 +1,11 @@ from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.utils import parse_version +from functools import wraps + from .patches import ( _create_get_model_wrapper, - _create_runner_get_all_tools_wrapper, - _create_run_loop_get_all_tools_wrapper, + _get_all_tools, _create_run_wrapper, _create_run_streamed_wrapper, _patch_agent_run, @@ -19,6 +20,7 @@ # after it, even if we don't use it. import agents from agents.run import DEFAULT_AGENT_RUNNER + from agents.run import AgentRunner from agents.version import __version__ as OPENAI_AGENTS_VERSION except ImportError: @@ -28,10 +30,9 @@ try: # AgentRunner methods moved in v0.8 # https://github.com/openai/openai-agents-python/commit/3ce7c24d349b77bb750062b7e0e856d9ff48a5d5#diff-7470b3a5c5cbe2fcbb2703dc24f326f45a5819d853be2b1f395d122d278cd911 - from agents.run_internal import run_loop, turn_preparation + from agents.run_internal import run_loop except ImportError: run_loop = None - turn_preparation = None def _patch_runner() -> None: @@ -57,18 +58,6 @@ def _patch_model() -> None: ) -def _patch_agent_runner_get_all_tools() -> None: - agents.run.AgentRunner._get_all_tools = classmethod( - _create_runner_get_all_tools_wrapper(agents.run.AgentRunner._get_all_tools), - ) - - -def _patch_run_get_all_tools() -> None: - agents.run.get_all_tools = _create_run_loop_get_all_tools_wrapper( - run_loop.get_all_tools - ) - - class OpenAIAgentsIntegration(Integration): identifier = "openai_agents" @@ -83,7 +72,27 @@ def setup_once() -> None: 0, 8, ): - _patch_run_get_all_tools() + + @wraps(run_loop.get_all_tools) + async def new_wrapped_get_all_tools( + agent: "agents.Agent", + context_wrapper: "agents.RunContextWrapper", + ) -> "list[agents.Tool]": + return await _get_all_tools( + run_loop.get_all_tools, agent, context_wrapper + ) + + agents.run.get_all_tools = new_wrapped_get_all_tools return - _patch_agent_runner_get_all_tools() + original_get_all_tools = AgentRunner._get_all_tools + + @wraps(AgentRunner._get_all_tools.__func__) + async def old_wrapped_get_all_tools( + cls: "agents.Runner", + agent: "agents.Agent", + context_wrapper: "agents.RunContextWrapper", + ) -> "list[agents.Tool]": + return await _get_all_tools(original_get_all_tools, agent, context_wrapper) + + agents.run.AgentRunner._get_all_tools = classmethod(old_wrapped_get_all_tools) diff --git a/sentry_sdk/integrations/openai_agents/patches/__init__.py b/sentry_sdk/integrations/openai_agents/patches/__init__.py index 675f8c4fc4..ab3948bdc1 100644 --- a/sentry_sdk/integrations/openai_agents/patches/__init__.py +++ b/sentry_sdk/integrations/openai_agents/patches/__init__.py @@ -1,8 +1,5 @@ from .models import _create_get_model_wrapper # noqa: F401 -from .tools import ( - _create_runner_get_all_tools_wrapper, - _create_run_loop_get_all_tools_wrapper, -) # noqa: F401 +from .tools import _get_all_tools # noqa: F401 from .runner import _create_run_wrapper, _create_run_streamed_wrapper # noqa: F401 from .agent_run import _patch_agent_run # noqa: F401 from .error_tracing import _patch_error_tracing # noqa: F401 diff --git a/sentry_sdk/integrations/openai_agents/patches/tools.py b/sentry_sdk/integrations/openai_agents/patches/tools.py index 561d9449e2..7674c24a8d 100644 --- a/sentry_sdk/integrations/openai_agents/patches/tools.py +++ b/sentry_sdk/integrations/openai_agents/patches/tools.py @@ -66,34 +66,3 @@ async def sentry_wrapped_on_invoke_tool( wrapped_tools.append(wrapped_tool) return wrapped_tools - - -def _create_runner_get_all_tools_wrapper( - original_get_all_tools: "Callable[..., Any]", -) -> "Callable[..., Any]": - @wraps( - original_get_all_tools.__func__ - if hasattr(original_get_all_tools, "__func__") - else original_get_all_tools - ) - async def wrapped_get_all_tools( - cls: "agents.Runner", - agent: "agents.Agent", - context_wrapper: "agents.RunContextWrapper", - ) -> "list[agents.Tool]": - return await _get_all_tools(original_get_all_tools, agent, context_wrapper) - - return wrapped_get_all_tools - - -def _create_run_loop_get_all_tools_wrapper( - original_get_all_tools: "Callable[..., Any]", -) -> "Callable[..., Any]": - @wraps(original_get_all_tools) - async def wrapped_get_all_tools( - agent: "agents.Agent", - context_wrapper: "agents.RunContextWrapper", - ) -> "list[agents.Tool]": - return await _get_all_tools(original_get_all_tools, agent, context_wrapper) - - return wrapped_get_all_tools From b9e9d6b84c2e2e15f87f9ebf568ed82e04d0cad4 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 12 Feb 2026 09:35:47 +0100 Subject: [PATCH 05/16] fix(openai-agents): Patch model functions following library refactor --- .../integrations/openai_agents/__init__.py | 21 +- .../openai_agents/patches/__init__.py | 2 +- .../openai_agents/patches/models.py | 235 +++++++++--------- 3 files changed, 124 insertions(+), 134 deletions(-) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index e12a9c2a65..1882732614 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -4,7 +4,7 @@ from functools import wraps from .patches import ( - _create_get_model_wrapper, + _get_model, _get_all_tools, _create_run_wrapper, _create_run_streamed_wrapper, @@ -52,12 +52,6 @@ def _patch_runner() -> None: _patch_agent_run() -def _patch_model() -> None: - agents.run.AgentRunner._get_model = classmethod( - _create_get_model_wrapper(agents.run.AgentRunner._get_model), - ) - - class OpenAIAgentsIntegration(Integration): """ NOTE: With version 0.8.0, the class methods below have been refactored to functions. @@ -73,7 +67,7 @@ class OpenAIAgentsIntegration(Integration): - `Runner.run()` and `Runner.run_streamed()` are thin wrappers for `DEFAULT_AGENT_RUNNER.run()` and `DEFAULT_AGENT_RUNNER.run_streamed()`. - `DEFAULT_AGENT_RUNNER.run()` and `DEFAULT_AGENT_RUNNER.run_streamed()` are patched in `_patch_runner()` with `_create_run_wrapper()` and `_create_run_streamed_wrapper()`, respectively. 3. In a loop, the agent repeatedly calls the Responses API, maintaining a conversation history that includes previous messages and tool results, which is passed to each call. - - A Model instance is created at the start of the loop by calling the `Runner._get_model()`. We patch the Model instance using `_create_get_model_wrapper()` in `_patch_model()`. + - A Model instance is created at the start of the loop by calling the `Runner._get_model()`. We patch the Model instance using `patches._get_model()`. - Available tools are also deteremined at the start of the loop, with `Runner._get_all_tools()`. We patch Tool instances by iterating through the returned tools in `_get_all_tools()`. - In each loop iteration, `run_single_turn()` or `run_single_turn_streamed()` is responsible for calling the Responses API, patched with `patched_run_single_turn()` and `patched_run_single_turn_streamed()`. 4. On loop termination, `RunImpl.execute_final_output()` is called. The function is patched with `patched_execute_final_output()`. @@ -90,7 +84,6 @@ class OpenAIAgentsIntegration(Integration): @staticmethod def setup_once() -> None: _patch_error_tracing() - _patch_model() _patch_runner() library_version = parse_version(OPENAI_AGENTS_VERSION) @@ -122,3 +115,13 @@ async def old_wrapped_get_all_tools( return await _get_all_tools(original_get_all_tools, agent, context_wrapper) agents.run.AgentRunner._get_all_tools = classmethod(old_wrapped_get_all_tools) + + original_get_model = AgentRunner._get_model + + @wraps(AgentRunner._get_model.__func__) + def old_wrapped_get_model( + cls: "agents.Runner", agent: "agents.Agent", run_config: "agents.RunConfig" + ) -> "list[agents.Tool]": + return _get_model(original_get_model, agent, run_config) + + agents.run.AgentRunner._get_model = classmethod(old_wrapped_get_model) diff --git a/sentry_sdk/integrations/openai_agents/patches/__init__.py b/sentry_sdk/integrations/openai_agents/patches/__init__.py index ab3948bdc1..fe06200793 100644 --- a/sentry_sdk/integrations/openai_agents/patches/__init__.py +++ b/sentry_sdk/integrations/openai_agents/patches/__init__.py @@ -1,4 +1,4 @@ -from .models import _create_get_model_wrapper # noqa: F401 +from .models import _get_model # noqa: F401 from .tools import _get_all_tools # noqa: F401 from .runner import _create_run_wrapper, _create_run_streamed_wrapper # noqa: F401 from .agent_run import _patch_agent_run # noqa: F401 diff --git a/sentry_sdk/integrations/openai_agents/patches/models.py b/sentry_sdk/integrations/openai_agents/patches/models.py index 5d4d71185f..6b5dceef97 100644 --- a/sentry_sdk/integrations/openai_agents/patches/models.py +++ b/sentry_sdk/integrations/openai_agents/patches/models.py @@ -66,141 +66,128 @@ def _inject_trace_propagation_headers( headers[key] = value -def _create_get_model_wrapper( - original_get_model: "Callable[..., Any]", -) -> "Callable[..., Any]": +def _get_model( + original_get_model: "Callable[..., agents.Model]", + agent: "agents.Agent", + run_config: "agents.RunConfig", +) -> "agents.Model": """ - Wraps the agents.Runner._get_model method to wrap the get_response method of the model to create a AI client span. - Responsible for - creating and managing AI client spans. - adding trace propagation headers to tools with type HostedMCPTool. - setting the response model on agent invocation spans. """ + # copy the model to double patching its methods. We use copy on purpose here (instead of deepcopy) + # because we only patch its direct methods, all underlying data can remain unchanged. + model = copy.copy(original_get_model(agent, run_config)) + + # Capture the request model name for spans (agent.model can be None when using defaults) + request_model_name = model.model if hasattr(model, "model") else str(model) + agent._sentry_request_model = request_model_name + + # Wrap _fetch_response if it exists (for OpenAI models) to capture response model + if hasattr(model, "_fetch_response"): + original_fetch_response = model._fetch_response + + @wraps(original_fetch_response) + async def wrapped_fetch_response(*args: "Any", **kwargs: "Any") -> "Any": + response = await original_fetch_response(*args, **kwargs) + if hasattr(response, "model") and response.model: + agent._sentry_response_model = str(response.model) + return response + + model._fetch_response = wrapped_fetch_response + + original_get_response = model.get_response + + @wraps(original_get_response) + async def wrapped_get_response(*args: "Any", **kwargs: "Any") -> "Any": + mcp_tools = kwargs.get("tools") + hosted_tools = [] + if mcp_tools is not None: + hosted_tools = [ + tool for tool in mcp_tools if isinstance(tool, HostedMCPTool) + ] + + with ai_client_span(agent, kwargs) as span: + for hosted_tool in hosted_tools: + _inject_trace_propagation_headers(hosted_tool, span=span) + + result = await original_get_response(*args, **kwargs) + + # Get response model captured from _fetch_response and clean up + response_model = getattr(agent, "_sentry_response_model", None) + if response_model: + delattr(agent, "_sentry_response_model") + + _set_response_model_on_agent_span(agent, response_model) + update_ai_client_span(span, result, response_model, agent) + + return result + + model.get_response = wrapped_get_response + + # Also wrap stream_response for streaming support + if hasattr(model, "stream_response"): + original_stream_response = model.stream_response + + @wraps(original_stream_response) + async def wrapped_stream_response(*args: "Any", **kwargs: "Any") -> "Any": + # Uses explicit try/finally instead of context manager to ensure cleanup + # even if the consumer abandons the stream (GeneratorExit). + span_kwargs = dict(kwargs) + if len(args) > 0: + span_kwargs["system_instructions"] = args[0] + if len(args) > 1: + span_kwargs["input"] = args[1] - @wraps( - original_get_model.__func__ - if hasattr(original_get_model, "__func__") - else original_get_model - ) - def wrapped_get_model( - cls: "agents.Runner", agent: "agents.Agent", run_config: "agents.RunConfig" - ) -> "agents.Model": - # copy the model to double patching its methods. We use copy on purpose here (instead of deepcopy) - # because we only patch its direct methods, all underlying data can remain unchanged. - model = copy.copy(original_get_model(agent, run_config)) - - # Capture the request model name for spans (agent.model can be None when using defaults) - request_model_name = model.model if hasattr(model, "model") else str(model) - agent._sentry_request_model = request_model_name - - # Wrap _fetch_response if it exists (for OpenAI models) to capture response model - if hasattr(model, "_fetch_response"): - original_fetch_response = model._fetch_response - - @wraps(original_fetch_response) - async def wrapped_fetch_response(*args: "Any", **kwargs: "Any") -> "Any": - response = await original_fetch_response(*args, **kwargs) - if hasattr(response, "model") and response.model: - agent._sentry_response_model = str(response.model) - return response - - model._fetch_response = wrapped_fetch_response - - original_get_response = model.get_response - - @wraps(original_get_response) - async def wrapped_get_response(*args: "Any", **kwargs: "Any") -> "Any": - mcp_tools = kwargs.get("tools") hosted_tools = [] - if mcp_tools is not None: - hosted_tools = [ - tool for tool in mcp_tools if isinstance(tool, HostedMCPTool) - ] + if len(args) > 3: + mcp_tools = args[3] + + if mcp_tools is not None: + hosted_tools = [ + tool for tool in mcp_tools if isinstance(tool, HostedMCPTool) + ] - with ai_client_span(agent, kwargs) as span: + with ai_client_span(agent, span_kwargs) as span: for hosted_tool in hosted_tools: _inject_trace_propagation_headers(hosted_tool, span=span) - result = await original_get_response(*args, **kwargs) - - # Get response model captured from _fetch_response and clean up - response_model = getattr(agent, "_sentry_response_model", None) - if response_model: - delattr(agent, "_sentry_response_model") - - _set_response_model_on_agent_span(agent, response_model) - update_ai_client_span(span, result, response_model, agent) - - return result - - model.get_response = wrapped_get_response - - # Also wrap stream_response for streaming support - if hasattr(model, "stream_response"): - original_stream_response = model.stream_response - - @wraps(original_stream_response) - async def wrapped_stream_response(*args: "Any", **kwargs: "Any") -> "Any": - # Uses explicit try/finally instead of context manager to ensure cleanup - # even if the consumer abandons the stream (GeneratorExit). - span_kwargs = dict(kwargs) - if len(args) > 0: - span_kwargs["system_instructions"] = args[0] - if len(args) > 1: - span_kwargs["input"] = args[1] - - hosted_tools = [] - if len(args) > 3: - mcp_tools = args[3] - - if mcp_tools is not None: - hosted_tools = [ - tool - for tool in mcp_tools - if isinstance(tool, HostedMCPTool) - ] - - with ai_client_span(agent, span_kwargs) as span: - for hosted_tool in hosted_tools: - _inject_trace_propagation_headers(hosted_tool, span=span) - - span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) - - streaming_response = None - ttft_recorded = False - # Capture start time locally to avoid race conditions with concurrent requests - start_time = time.perf_counter() - - async for event in original_stream_response(*args, **kwargs): - # Detect first content token (text delta event) - if not ttft_recorded and hasattr(event, "delta"): - ttft = time.perf_counter() - start_time - span.set_data( - SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN, ttft - ) - ttft_recorded = True - - # Capture the full response from ResponseCompletedEvent - if hasattr(event, "response"): - streaming_response = event.response - yield event - - # Update span with response data (usage, output, model) - if streaming_response: - response_model = ( - str(streaming_response.model) - if hasattr(streaming_response, "model") - and streaming_response.model - else None - ) - _set_response_model_on_agent_span(agent, response_model) - update_ai_client_span( - span, streaming_response, response_model, agent - ) - - model.stream_response = wrapped_stream_response + span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) - return model + streaming_response = None + ttft_recorded = False + # Capture start time locally to avoid race conditions with concurrent requests + start_time = time.perf_counter() - return wrapped_get_model + async for event in original_stream_response(*args, **kwargs): + # Detect first content token (text delta event) + if not ttft_recorded and hasattr(event, "delta"): + ttft = time.perf_counter() - start_time + span.set_data( + SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN, ttft + ) + ttft_recorded = True + + # Capture the full response from ResponseCompletedEvent + if hasattr(event, "response"): + streaming_response = event.response + yield event + + # Update span with response data (usage, output, model) + if streaming_response: + response_model = ( + str(streaming_response.model) + if hasattr(streaming_response, "model") + and streaming_response.model + else None + ) + _set_response_model_on_agent_span(agent, response_model) + update_ai_client_span( + span, streaming_response, response_model, agent + ) + + model.stream_response = wrapped_stream_response + + return model From 0a2ede87e8691c7b3183b34fdee44c83279c1f31 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 12 Feb 2026 09:38:02 +0100 Subject: [PATCH 06/16] . --- sentry_sdk/integrations/openai_agents/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index e12a9c2a65..0c551fd9bd 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -74,7 +74,7 @@ class OpenAIAgentsIntegration(Integration): - `DEFAULT_AGENT_RUNNER.run()` and `DEFAULT_AGENT_RUNNER.run_streamed()` are patched in `_patch_runner()` with `_create_run_wrapper()` and `_create_run_streamed_wrapper()`, respectively. 3. In a loop, the agent repeatedly calls the Responses API, maintaining a conversation history that includes previous messages and tool results, which is passed to each call. - A Model instance is created at the start of the loop by calling the `Runner._get_model()`. We patch the Model instance using `_create_get_model_wrapper()` in `_patch_model()`. - - Available tools are also deteremined at the start of the loop, with `Runner._get_all_tools()`. We patch Tool instances by iterating through the returned tools in `_get_all_tools()`. + - Available tools are also deteremined at the start of the loop, with `Runner._get_all_tools()`. We patch Tool instances by iterating through the returned tools in `patches._get_all_tools()`. - In each loop iteration, `run_single_turn()` or `run_single_turn_streamed()` is responsible for calling the Responses API, patched with `patched_run_single_turn()` and `patched_run_single_turn_streamed()`. 4. On loop termination, `RunImpl.execute_final_output()` is called. The function is patched with `patched_execute_final_output()`. From 0970319f2c156e85b15452886208d86dc61d43c5 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 12 Feb 2026 09:44:12 +0100 Subject: [PATCH 07/16] . --- sentry_sdk/integrations/openai_agents/__init__.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index c106867a98..62a6da5d40 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -30,9 +30,10 @@ try: # AgentRunner methods moved in v0.8 # https://github.com/openai/openai-agents-python/commit/3ce7c24d349b77bb750062b7e0e856d9ff48a5d5#diff-7470b3a5c5cbe2fcbb2703dc24f326f45a5819d853be2b1f395d122d278cd911 - from agents.run_internal import run_loop + from agents.run_internal import run_loop, turn_preparation except ImportError: run_loop = None + turn_preparation = None def _patch_runner() -> None: @@ -102,6 +103,14 @@ async def new_wrapped_get_all_tools( ) agents.run.get_all_tools = new_wrapped_get_all_tools + + @wraps(turn_preparation.get_model) + def new_wrapped_get_model( + agent: "agents.Agent", run_config: "agents.RunConfig" + ) -> "agents.Model": + return _get_model(turn_preparation.get_model, agent, run_config) + + agents.run_internal.run_loop.get_model = new_wrapped_get_model return original_get_all_tools = AgentRunner._get_all_tools From ec2b633e3b867e28a02fa5696f6d846b84fe18f3 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 12 Feb 2026 10:03:23 +0100 Subject: [PATCH 08/16] fix(openai-agents): Patch run_single_turn() following library refactor --- .../integrations/openai_agents/__init__.py | 27 ++- .../openai_agents/patches/__init__.py | 2 +- .../openai_agents/patches/agent_run.py | 164 +++++++++--------- 3 files changed, 110 insertions(+), 83 deletions(-) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index 62a6da5d40..7d86dccbb6 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -6,6 +6,7 @@ from .patches import ( _get_model, _get_all_tools, + _run_single_turn, _create_run_wrapper, _create_run_streamed_wrapper, _patch_agent_run, @@ -35,6 +36,11 @@ run_loop = None turn_preparation = None +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import Any + def _patch_runner() -> None: # Create the root span for one full agent run (including eventual handoffs) @@ -70,7 +76,7 @@ class OpenAIAgentsIntegration(Integration): 3. In a loop, the agent repeatedly calls the Responses API, maintaining a conversation history that includes previous messages and tool results, which is passed to each call. - A Model instance is created at the start of the loop by calling the `Runner._get_model()`. We patch the Model instance using `patches._get_model()`. - Available tools are also deteremined at the start of the loop, with `Runner._get_all_tools()`. We patch Tool instances by iterating through the returned tools in `patches._get_all_tools()`. - - In each loop iteration, `run_single_turn()` or `run_single_turn_streamed()` is responsible for calling the Responses API, patched with `patched_run_single_turn()` and `patched_run_single_turn_streamed()`. + - In each loop iteration, `run_single_turn()` or `run_single_turn_streamed()` is responsible for calling the Responses API, patched with `patches._run_single_turn()` and `patched_run_single_turn_streamed()`. 4. On loop termination, `RunImpl.execute_final_output()` is called. The function is patched with `patched_execute_final_output()`. Local tools are run based on the return value from the Responses API as a post-API call step in the above loop. @@ -111,6 +117,13 @@ def new_wrapped_get_model( return _get_model(turn_preparation.get_model, agent, run_config) agents.run_internal.run_loop.get_model = new_wrapped_get_model + + @wraps(run_loop.run_single_turn) + async def patched_run_single_turn(*args: "Any", **kwargs: "Any") -> "Any": + return await _run_single_turn(run_loop.run_single_turn, *args, **kwargs) + + agents.run.run_single_turn = patched_run_single_turn + return original_get_all_tools = AgentRunner._get_all_tools @@ -134,3 +147,15 @@ def old_wrapped_get_model( return _get_model(original_get_model, agent, run_config) agents.run.AgentRunner._get_model = classmethod(old_wrapped_get_model) + + original_run_single_turn = AgentRunner._run_single_turn + + @wraps(AgentRunner._run_single_turn) + async def old_wrapped_run_single_turn( + cls: "agents.Runner", *args: "Any", **kwargs: "Any" + ) -> "Any": + return await _run_single_turn(original_run_single_turn, *args, **kwargs) + + agents.run.AgentRunner._run_single_turn = classmethod( + old_wrapped_run_single_turn + ) diff --git a/sentry_sdk/integrations/openai_agents/patches/__init__.py b/sentry_sdk/integrations/openai_agents/patches/__init__.py index fe06200793..d471a9f35c 100644 --- a/sentry_sdk/integrations/openai_agents/patches/__init__.py +++ b/sentry_sdk/integrations/openai_agents/patches/__init__.py @@ -1,5 +1,5 @@ from .models import _get_model # noqa: F401 from .tools import _get_all_tools # noqa: F401 from .runner import _create_run_wrapper, _create_run_streamed_wrapper # noqa: F401 -from .agent_run import _patch_agent_run # noqa: F401 +from .agent_run import _run_single_turn, _patch_agent_run # noqa: F401 from .error_tracing import _patch_error_tracing # noqa: F401 diff --git a/sentry_sdk/integrations/openai_agents/patches/agent_run.py b/sentry_sdk/integrations/openai_agents/patches/agent_run.py index 138151d930..b7fe7fc236 100644 --- a/sentry_sdk/integrations/openai_agents/patches/agent_run.py +++ b/sentry_sdk/integrations/openai_agents/patches/agent_run.py @@ -14,108 +14,111 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Any, Optional + from typing import Any, Optional, Callable, Awaitable from sentry_sdk.tracing import Span + from agents.run_internal.run_steps import SingleStepResult + try: import agents except ImportError: raise DidNotEnable("OpenAI Agents not installed") -def _patch_agent_run() -> None: - """ - Patches AgentRunner methods to create agent invocation spans. - This directly patches the execution flow to track when agents start and stop. +def _has_active_agent_span(context_wrapper: "agents.RunContextWrapper") -> bool: + """Check if there's an active agent span for this context""" + return getattr(context_wrapper, "_sentry_current_agent", None) is not None + + +def _get_current_agent( + context_wrapper: "agents.RunContextWrapper", +) -> "Optional[agents.Agent]": + """Get the current agent from context wrapper""" + return getattr(context_wrapper, "_sentry_current_agent", None) + + +def _close_streaming_workflow_span(agent: "Optional[agents.Agent]") -> None: + """Close the workflow span for streaming executions if it exists.""" + if agent and hasattr(agent, "_sentry_workflow_span"): + workflow_span = agent._sentry_workflow_span + workflow_span.__exit__(*sys.exc_info()) + delattr(agent, "_sentry_workflow_span") + + +def _maybe_start_agent_span( + context_wrapper: "agents.RunContextWrapper", + agent: "agents.Agent", + should_run_agent_start_hooks: bool, + span_kwargs: "dict[str, Any]", + is_streaming: bool = False, +) -> "Optional[Span]": """ + Start an agent invocation span if conditions are met. + Handles ending any existing span for a different agent. - # Store original methods - original_run_single_turn = agents.run.AgentRunner._run_single_turn - original_run_single_turn_streamed = agents.run.AgentRunner._run_single_turn_streamed - original_execute_handoffs = agents._run_impl.RunImpl.execute_handoffs - original_execute_final_output = agents._run_impl.RunImpl.execute_final_output + Returns the new span if started, or the existing span if conditions aren't met. + """ + if not (should_run_agent_start_hooks and agent and context_wrapper): + return getattr(context_wrapper, "_sentry_agent_span", None) - def _has_active_agent_span(context_wrapper: "agents.RunContextWrapper") -> bool: - """Check if there's an active agent span for this context""" - return getattr(context_wrapper, "_sentry_current_agent", None) is not None - - def _get_current_agent( - context_wrapper: "agents.RunContextWrapper", - ) -> "Optional[agents.Agent]": - """Get the current agent from context wrapper""" - return getattr(context_wrapper, "_sentry_current_agent", None) - - def _close_streaming_workflow_span(agent: "Optional[agents.Agent]") -> None: - """Close the workflow span for streaming executions if it exists.""" - if agent and hasattr(agent, "_sentry_workflow_span"): - workflow_span = agent._sentry_workflow_span - workflow_span.__exit__(*sys.exc_info()) - delattr(agent, "_sentry_workflow_span") - - def _maybe_start_agent_span( - context_wrapper: "agents.RunContextWrapper", - agent: "agents.Agent", - should_run_agent_start_hooks: bool, - span_kwargs: "dict[str, Any]", - is_streaming: bool = False, - ) -> "Optional[Span]": - """ - Start an agent invocation span if conditions are met. - Handles ending any existing span for a different agent. + # End any existing span for a different agent + if _has_active_agent_span(context_wrapper): + current_agent = _get_current_agent(context_wrapper) + if current_agent and current_agent != agent: + end_invoke_agent_span(context_wrapper, current_agent) - Returns the new span if started, or the existing span if conditions aren't met. - """ - if not (should_run_agent_start_hooks and agent and context_wrapper): - return getattr(context_wrapper, "_sentry_agent_span", None) + # Store the agent on the context wrapper so we can access it later + context_wrapper._sentry_current_agent = agent + span = invoke_agent_span(context_wrapper, agent, span_kwargs) + context_wrapper._sentry_agent_span = span + agent._sentry_agent_span = span - # End any existing span for a different agent - if _has_active_agent_span(context_wrapper): - current_agent = _get_current_agent(context_wrapper) - if current_agent and current_agent != agent: - end_invoke_agent_span(context_wrapper, current_agent) + if is_streaming: + span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) - # Store the agent on the context wrapper so we can access it later - context_wrapper._sentry_current_agent = agent - span = invoke_agent_span(context_wrapper, agent, span_kwargs) - context_wrapper._sentry_agent_span = span - agent._sentry_agent_span = span + return span - if is_streaming: - span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) - return span +async def _run_single_turn( + original_run_single_turn: "Callable[..., Awaitable[SingleStepResult]]", + *args: "Any", + **kwargs: "Any", +) -> "Any": + """ + Patched _run_single_turn that + - creates agent invocation spans if there is no already active agent invocation span. + - ends the agent invocation span if and only if an exception is raised in `_run_single_turn()`. + """ + agent = kwargs.get("agent") + context_wrapper = kwargs.get("context_wrapper") + should_run_agent_start_hooks = kwargs.get("should_run_agent_start_hooks", False) - @wraps( - original_run_single_turn.__func__ - if hasattr(original_run_single_turn, "__func__") - else original_run_single_turn + span = _maybe_start_agent_span( + context_wrapper, agent, should_run_agent_start_hooks, kwargs ) - async def patched_run_single_turn( - cls: "agents.Runner", *args: "Any", **kwargs: "Any" - ) -> "Any": - """ - Patched _run_single_turn that - - creates agent invocation spans if there is no already active agent invocation span. - - ends the agent invocation span if and only if an exception is raised in `_run_single_turn()`. - """ - agent = kwargs.get("agent") - context_wrapper = kwargs.get("context_wrapper") - should_run_agent_start_hooks = kwargs.get("should_run_agent_start_hooks", False) - span = _maybe_start_agent_span( - context_wrapper, agent, should_run_agent_start_hooks, kwargs - ) + try: + result = await original_run_single_turn(*args, **kwargs) + except Exception as exc: + if span is not None and span.timestamp is None: + _record_exception_on_span(span, exc) + end_invoke_agent_span(context_wrapper, agent) + reraise(*sys.exc_info()) - try: - result = await original_run_single_turn(*args, **kwargs) - except Exception as exc: - if span is not None and span.timestamp is None: - _record_exception_on_span(span, exc) - end_invoke_agent_span(context_wrapper, agent) - reraise(*sys.exc_info()) + return result - return result + +def _patch_agent_run() -> None: + """ + Patches AgentRunner methods to create agent invocation spans. + This directly patches the execution flow to track when agents start and stop. + """ + + # Store original methods + original_run_single_turn_streamed = agents.run.AgentRunner._run_single_turn_streamed + original_execute_handoffs = agents._run_impl.RunImpl.execute_handoffs + original_execute_final_output = agents._run_impl.RunImpl.execute_final_output @wraps( original_execute_handoffs.__func__ @@ -252,7 +255,6 @@ async def patched_run_single_turn_streamed( return result # Apply patches - agents.run.AgentRunner._run_single_turn = classmethod(patched_run_single_turn) agents.run.AgentRunner._run_single_turn_streamed = classmethod( patched_run_single_turn_streamed ) From bba10b3e33d5ab891a425d5d08a3f933bb95bdee Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 12 Feb 2026 10:31:59 +0100 Subject: [PATCH 09/16] fix(openai-agents): Patch run_single_turn_streamed functions following library refactor --- .../integrations/openai_agents/__init__.py | 27 +++- .../openai_agents/patches/__init__.py | 2 +- .../openai_agents/patches/agent_run.py | 122 ++++++++---------- 3 files changed, 84 insertions(+), 67 deletions(-) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index 7d86dccbb6..d788f48fd2 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -7,6 +7,7 @@ _get_model, _get_all_tools, _run_single_turn, + _run_single_turn_streamed, _create_run_wrapper, _create_run_streamed_wrapper, _patch_agent_run, @@ -76,7 +77,7 @@ class OpenAIAgentsIntegration(Integration): 3. In a loop, the agent repeatedly calls the Responses API, maintaining a conversation history that includes previous messages and tool results, which is passed to each call. - A Model instance is created at the start of the loop by calling the `Runner._get_model()`. We patch the Model instance using `patches._get_model()`. - Available tools are also deteremined at the start of the loop, with `Runner._get_all_tools()`. We patch Tool instances by iterating through the returned tools in `patches._get_all_tools()`. - - In each loop iteration, `run_single_turn()` or `run_single_turn_streamed()` is responsible for calling the Responses API, patched with `patches._run_single_turn()` and `patched_run_single_turn_streamed()`. + - In each loop iteration, `run_single_turn()` or `run_single_turn_streamed()` is responsible for calling the Responses API, patched with `patches._run_single_turn()` and `patches._run_single_turn_streamed()`. 4. On loop termination, `RunImpl.execute_final_output()` is called. The function is patched with `patched_execute_final_output()`. Local tools are run based on the return value from the Responses API as a post-API call step in the above loop. @@ -124,6 +125,16 @@ async def patched_run_single_turn(*args: "Any", **kwargs: "Any") -> "Any": agents.run.run_single_turn = patched_run_single_turn + @wraps(run_loop.run_single_turn_streamed) + async def new_wrapped_run_single_turn_streamed( + *args: "Any", **kwargs: "Any" + ) -> "Any": + return await _run_single_turn_streamed( + run_loop.run_single_turn_streamed, *args, **kwargs + ) + + agents.run.run_single_turn_streamed = new_wrapped_run_single_turn_streamed + return original_get_all_tools = AgentRunner._get_all_tools @@ -159,3 +170,17 @@ async def old_wrapped_run_single_turn( agents.run.AgentRunner._run_single_turn = classmethod( old_wrapped_run_single_turn ) + + original_run_single_turn_streamed = AgentRunner._run_single_turn_streamed + + @wraps(AgentRunner._run_single_turn_streamed) + async def old_wrapped_run_single_turn_streamed( + cls: "agents.Runner", *args: "Any", **kwargs: "Any" + ) -> "Any": + return await _run_single_turn_streamed( + original_run_single_turn_streamed, *args, **kwargs + ) + + agents.run.AgentRunner._run_single_turn_streamed = classmethod( + old_wrapped_run_single_turn_streamed + ) diff --git a/sentry_sdk/integrations/openai_agents/patches/__init__.py b/sentry_sdk/integrations/openai_agents/patches/__init__.py index d471a9f35c..0c06d96cc4 100644 --- a/sentry_sdk/integrations/openai_agents/patches/__init__.py +++ b/sentry_sdk/integrations/openai_agents/patches/__init__.py @@ -1,5 +1,5 @@ from .models import _get_model # noqa: F401 from .tools import _get_all_tools # noqa: F401 from .runner import _create_run_wrapper, _create_run_streamed_wrapper # noqa: F401 -from .agent_run import _run_single_turn, _patch_agent_run # noqa: F401 +from .agent_run import _run_single_turn, _run_single_turn_streamed, _patch_agent_run # noqa: F401 from .error_tracing import _patch_error_tracing # noqa: F401 diff --git a/sentry_sdk/integrations/openai_agents/patches/agent_run.py b/sentry_sdk/integrations/openai_agents/patches/agent_run.py index b7fe7fc236..de3cf62f75 100644 --- a/sentry_sdk/integrations/openai_agents/patches/agent_run.py +++ b/sentry_sdk/integrations/openai_agents/patches/agent_run.py @@ -109,6 +109,63 @@ async def _run_single_turn( return result +async def _run_single_turn_streamed( + original_run_single_turn_streamed: "Callable[..., Awaitable[SingleStepResult]]", + *args: "Any", + **kwargs: "Any", +) -> "SingleStepResult": + """ + Patched _run_single_turn_streamed that + - creates agent invocation spans for streaming if there is no already active agent invocation span. + - ends the agent invocation span if and only if `_run_single_turn_streamed()` raises an exception. + + Note: Unlike _run_single_turn which uses keyword-only arguments (*,), + _run_single_turn_streamed uses positional arguments. The call signature is: + _run_single_turn_streamed( + streamed_result, # args[0] + agent, # args[1] + hooks, # args[2] + context_wrapper, # args[3] + run_config, # args[4] + should_run_agent_start_hooks, # args[5] + tool_use_tracker, # args[6] + all_tools, # args[7] + server_conversation_tracker, # args[8] (optional) + ) + """ + streamed_result = args[0] if len(args) > 0 else kwargs.get("streamed_result") + agent = args[1] if len(args) > 1 else kwargs.get("agent") + context_wrapper = args[3] if len(args) > 3 else kwargs.get("context_wrapper") + should_run_agent_start_hooks = bool( + args[5] if len(args) > 5 else kwargs.get("should_run_agent_start_hooks", False) + ) + + span_kwargs: "dict[str, Any]" = {} + if streamed_result and hasattr(streamed_result, "input"): + span_kwargs["original_input"] = streamed_result.input + + span = _maybe_start_agent_span( + context_wrapper, + agent, + should_run_agent_start_hooks, + span_kwargs, + is_streaming=True, + ) + + try: + result = await original_run_single_turn_streamed(*args, **kwargs) + except Exception as exc: + exc_info = sys.exc_info() + with capture_internal_exceptions(): + if span is not None and span.timestamp is None: + _record_exception_on_span(span, exc) + end_invoke_agent_span(context_wrapper, agent) + _close_streaming_workflow_span(agent) + reraise(*exc_info) + + return result + + def _patch_agent_run() -> None: """ Patches AgentRunner methods to create agent invocation spans. @@ -116,7 +173,6 @@ def _patch_agent_run() -> None: """ # Store original methods - original_run_single_turn_streamed = agents.run.AgentRunner._run_single_turn_streamed original_execute_handoffs = agents._run_impl.RunImpl.execute_handoffs original_execute_final_output = agents._run_impl.RunImpl.execute_final_output @@ -193,71 +249,7 @@ async def patched_execute_final_output( return result - @wraps( - original_run_single_turn_streamed.__func__ - if hasattr(original_run_single_turn_streamed, "__func__") - else original_run_single_turn_streamed - ) - async def patched_run_single_turn_streamed( - cls: "agents.Runner", *args: "Any", **kwargs: "Any" - ) -> "Any": - """ - Patched _run_single_turn_streamed that - - creates agent invocation spans for streaming if there is no already active agent invocation span. - - ends the agent invocation span if and only if `_run_single_turn_streamed()` raises an exception. - - Note: Unlike _run_single_turn which uses keyword-only arguments (*,), - _run_single_turn_streamed uses positional arguments. The call signature is: - _run_single_turn_streamed( - streamed_result, # args[0] - agent, # args[1] - hooks, # args[2] - context_wrapper, # args[3] - run_config, # args[4] - should_run_agent_start_hooks, # args[5] - tool_use_tracker, # args[6] - all_tools, # args[7] - server_conversation_tracker, # args[8] (optional) - ) - """ - streamed_result = args[0] if len(args) > 0 else kwargs.get("streamed_result") - agent = args[1] if len(args) > 1 else kwargs.get("agent") - context_wrapper = args[3] if len(args) > 3 else kwargs.get("context_wrapper") - should_run_agent_start_hooks = bool( - args[5] - if len(args) > 5 - else kwargs.get("should_run_agent_start_hooks", False) - ) - - span_kwargs: "dict[str, Any]" = {} - if streamed_result and hasattr(streamed_result, "input"): - span_kwargs["original_input"] = streamed_result.input - - span = _maybe_start_agent_span( - context_wrapper, - agent, - should_run_agent_start_hooks, - span_kwargs, - is_streaming=True, - ) - - try: - result = await original_run_single_turn_streamed(*args, **kwargs) - except Exception as exc: - exc_info = sys.exc_info() - with capture_internal_exceptions(): - if span is not None and span.timestamp is None: - _record_exception_on_span(span, exc) - end_invoke_agent_span(context_wrapper, agent) - _close_streaming_workflow_span(agent) - reraise(*exc_info) - - return result - # Apply patches - agents.run.AgentRunner._run_single_turn_streamed = classmethod( - patched_run_single_turn_streamed - ) agents._run_impl.RunImpl.execute_handoffs = classmethod(patched_execute_handoffs) agents._run_impl.RunImpl.execute_final_output = classmethod( patched_execute_final_output From a753a6e5d15e93f2ed56a8a8028d36e310e5700f Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 12 Feb 2026 10:47:03 +0100 Subject: [PATCH 10/16] fix(openai-agents): Patch execute_handoffs functions following library refactor --- .../integrations/openai_agents/__init__.py | 32 +++++++- .../openai_agents/patches/__init__.py | 7 +- .../openai_agents/patches/agent_run.py | 80 +++++++++---------- 3 files changed, 74 insertions(+), 45 deletions(-) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index d788f48fd2..6bf62c06b6 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -8,6 +8,7 @@ _get_all_tools, _run_single_turn, _run_single_turn_streamed, + _execute_handoffs, _create_run_wrapper, _create_run_streamed_wrapper, _patch_agent_run, @@ -32,10 +33,11 @@ try: # AgentRunner methods moved in v0.8 # https://github.com/openai/openai-agents-python/commit/3ce7c24d349b77bb750062b7e0e856d9ff48a5d5#diff-7470b3a5c5cbe2fcbb2703dc24f326f45a5819d853be2b1f395d122d278cd911 - from agents.run_internal import run_loop, turn_preparation + from agents.run_internal import run_loop, turn_preparation, turn_resolution except ImportError: run_loop = None turn_preparation = None + turn_resolution = None from typing import TYPE_CHECKING @@ -84,7 +86,7 @@ class OpenAIAgentsIntegration(Integration): Hosted MCP Tools are run as part of the Responses API call, and involve OpenAI reaching out to an external MCP server. An agent can handoff to another agent, also directed by the return value of the Responses API and run post-API call in the loop. Handoffs are a way to switch agent-wide configuration. - - Handoffs are executed by calling `RunImpl.execute_handoffs()`. The method is patched in `patched_execute_handoffs()` + - Handoffs are executed by calling `RunImpl.execute_handoffs()`. The method is patched with `patches._execute_handoffs()` """ identifier = "openai_agents" @@ -135,6 +137,20 @@ async def new_wrapped_run_single_turn_streamed( agents.run.run_single_turn_streamed = new_wrapped_run_single_turn_streamed + original_execute_handoffs = turn_resolution.execute_handoffs + + @wraps(original_execute_handoffs) + async def new_wrapped_execute_handoffs( + *args: "Any", **kwargs: "Any" + ) -> "Any": + return await _execute_handoffs( + original_execute_handoffs, *args, **kwargs + ) + + agents.run_internal.turn_resolution.execute_handoffs = ( + new_wrapped_execute_handoffs + ) + return original_get_all_tools = AgentRunner._get_all_tools @@ -184,3 +200,15 @@ async def old_wrapped_run_single_turn_streamed( agents.run.AgentRunner._run_single_turn_streamed = classmethod( old_wrapped_run_single_turn_streamed ) + + original_execute_handoffs = agents._run_impl.RunImpl.execute_handoffs + + @wraps(agents._run_impl.RunImpl.execute_handoffs.__func__) + async def old_wrapped_execute_handoffs( + cls: "agents.Runner", *args: "Any", **kwargs: "Any" + ) -> "Any": + return await _execute_handoffs(original_execute_handoffs, *args, **kwargs) + + agents._run_impl.RunImpl.execute_handoffs = classmethod( + old_wrapped_execute_handoffs + ) diff --git a/sentry_sdk/integrations/openai_agents/patches/__init__.py b/sentry_sdk/integrations/openai_agents/patches/__init__.py index 0c06d96cc4..ce2ba58d85 100644 --- a/sentry_sdk/integrations/openai_agents/patches/__init__.py +++ b/sentry_sdk/integrations/openai_agents/patches/__init__.py @@ -1,5 +1,10 @@ from .models import _get_model # noqa: F401 from .tools import _get_all_tools # noqa: F401 from .runner import _create_run_wrapper, _create_run_streamed_wrapper # noqa: F401 -from .agent_run import _run_single_turn, _run_single_turn_streamed, _patch_agent_run # noqa: F401 +from .agent_run import ( + _run_single_turn, + _run_single_turn_streamed, + _execute_handoffs, + _patch_agent_run, +) # noqa: F401 from .error_tracing import _patch_error_tracing # noqa: F401 diff --git a/sentry_sdk/integrations/openai_agents/patches/agent_run.py b/sentry_sdk/integrations/openai_agents/patches/agent_run.py index de3cf62f75..506e169337 100644 --- a/sentry_sdk/integrations/openai_agents/patches/agent_run.py +++ b/sentry_sdk/integrations/openai_agents/patches/agent_run.py @@ -166,55 +166,52 @@ async def _run_single_turn_streamed( return result -def _patch_agent_run() -> None: +async def _execute_handoffs( + original_execute_handoffs: "Callable[..., SingleStepResult]", + *args: "Any", + **kwargs: "Any", +) -> "SingleStepResult": """ - Patches AgentRunner methods to create agent invocation spans. - This directly patches the execution flow to track when agents start and stop. + Patched execute_handoffs that + - creates and manages handoff spans. + - ends the agent invocation span. + - ends the workflow span if the response is streamed and an exception is raised in `execute_handoffs()`. """ - # Store original methods - original_execute_handoffs = agents._run_impl.RunImpl.execute_handoffs - original_execute_final_output = agents._run_impl.RunImpl.execute_final_output + context_wrapper = kwargs.get("context_wrapper") + run_handoffs = kwargs.get("run_handoffs") + agent = kwargs.get("agent") - @wraps( - original_execute_handoffs.__func__ - if hasattr(original_execute_handoffs, "__func__") - else original_execute_handoffs - ) - async def patched_execute_handoffs( - cls: "agents.Runner", *args: "Any", **kwargs: "Any" - ) -> "Any": - """ - Patched execute_handoffs that - - creates and manages handoff spans. - - ends the agent invocation span. - - ends the workflow span if the response is streamed and an exception is raised in `execute_handoffs()`. - """ + # Create Sentry handoff span for the first handoff (agents library only processes the first one) + if run_handoffs: + first_handoff = run_handoffs[0] + handoff_agent_name = first_handoff.handoff.agent_name + handoff_span(context_wrapper, agent, handoff_agent_name) - context_wrapper = kwargs.get("context_wrapper") - run_handoffs = kwargs.get("run_handoffs") - agent = kwargs.get("agent") + # Call original method with all parameters + try: + result = await original_execute_handoffs(*args, **kwargs) + except Exception: + exc_info = sys.exc_info() + with capture_internal_exceptions(): + _close_streaming_workflow_span(agent) + reraise(*exc_info) + finally: + # End span for current agent after handoff processing is complete + if agent and context_wrapper and _has_active_agent_span(context_wrapper): + end_invoke_agent_span(context_wrapper, agent) - # Create Sentry handoff span for the first handoff (agents library only processes the first one) - if run_handoffs: - first_handoff = run_handoffs[0] - handoff_agent_name = first_handoff.handoff.agent_name - handoff_span(context_wrapper, agent, handoff_agent_name) + return result - # Call original method with all parameters - try: - result = await original_execute_handoffs(*args, **kwargs) - except Exception: - exc_info = sys.exc_info() - with capture_internal_exceptions(): - _close_streaming_workflow_span(agent) - reraise(*exc_info) - finally: - # End span for current agent after handoff processing is complete - if agent and context_wrapper and _has_active_agent_span(context_wrapper): - end_invoke_agent_span(context_wrapper, agent) - return result +def _patch_agent_run() -> None: + """ + Patches AgentRunner methods to create agent invocation spans. + This directly patches the execution flow to track when agents start and stop. + """ + + # Store original methods + original_execute_final_output = agents._run_impl.RunImpl.execute_final_output @wraps( original_execute_final_output.__func__ @@ -250,7 +247,6 @@ async def patched_execute_final_output( return result # Apply patches - agents._run_impl.RunImpl.execute_handoffs = classmethod(patched_execute_handoffs) agents._run_impl.RunImpl.execute_final_output = classmethod( patched_execute_final_output ) From 168e40580d3e59971855713e885b4ad5bf7f798e Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 12 Feb 2026 11:20:34 +0100 Subject: [PATCH 11/16] . --- sentry_sdk/integrations/openai_agents/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index 62a6da5d40..89e09169f2 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -130,7 +130,7 @@ async def old_wrapped_get_all_tools( @wraps(AgentRunner._get_model.__func__) def old_wrapped_get_model( cls: "agents.Runner", agent: "agents.Agent", run_config: "agents.RunConfig" - ) -> "list[agents.Tool]": + ) -> "list[agents.Model]": return _get_model(original_get_model, agent, run_config) agents.run.AgentRunner._get_model = classmethod(old_wrapped_get_model) From 85e8c3486eaa42c1ff5e5287ace05197449568a7 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 12 Feb 2026 11:28:02 +0100 Subject: [PATCH 12/16] . --- sentry_sdk/integrations/openai_agents/__init__.py | 10 +++++++--- .../integrations/openai_agents/patches/agent_run.py | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index f12d728cbb..1dbc82870b 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -41,6 +41,8 @@ if TYPE_CHECKING: from typing import Any + from agents.run_internal.run_steps import SingleStepResult + def _patch_runner() -> None: # Create the root span for one full agent run (including eventual handoffs) @@ -119,7 +121,9 @@ def new_wrapped_get_model( agents.run_internal.run_loop.get_model = new_wrapped_get_model @wraps(run_loop.run_single_turn) - async def patched_run_single_turn(*args: "Any", **kwargs: "Any") -> "Any": + async def patched_run_single_turn( + *args: "Any", **kwargs: "Any" + ) -> "SingleStepResult": return await _run_single_turn(run_loop.run_single_turn, *args, **kwargs) agents.run.run_single_turn = patched_run_single_turn @@ -150,10 +154,10 @@ def old_wrapped_get_model( original_run_single_turn = AgentRunner._run_single_turn - @wraps(AgentRunner._run_single_turn) + @wraps(AgentRunner._run_single_turn.__func__) async def old_wrapped_run_single_turn( cls: "agents.Runner", *args: "Any", **kwargs: "Any" - ) -> "Any": + ) -> "SingleStepResult": return await _run_single_turn(original_run_single_turn, *args, **kwargs) agents.run.AgentRunner._run_single_turn = classmethod( diff --git a/sentry_sdk/integrations/openai_agents/patches/agent_run.py b/sentry_sdk/integrations/openai_agents/patches/agent_run.py index b7fe7fc236..ae879ad9df 100644 --- a/sentry_sdk/integrations/openai_agents/patches/agent_run.py +++ b/sentry_sdk/integrations/openai_agents/patches/agent_run.py @@ -84,7 +84,7 @@ async def _run_single_turn( original_run_single_turn: "Callable[..., Awaitable[SingleStepResult]]", *args: "Any", **kwargs: "Any", -) -> "Any": +) -> "SingleStepResult": """ Patched _run_single_turn that - creates agent invocation spans if there is no already active agent invocation span. From c96a268059067d5622b17e5d84f59b1685122d13 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 12 Feb 2026 11:29:39 +0100 Subject: [PATCH 13/16] . --- sentry_sdk/integrations/openai_agents/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index 836f640c6d..7838ca87a5 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -132,7 +132,7 @@ async def patched_run_single_turn( @wraps(run_loop.run_single_turn_streamed) async def new_wrapped_run_single_turn_streamed( *args: "Any", **kwargs: "Any" - ) -> "Any": + ) -> "SingleStepResult": return await _run_single_turn_streamed( run_loop.run_single_turn_streamed, *args, **kwargs ) @@ -177,10 +177,10 @@ async def old_wrapped_run_single_turn( original_run_single_turn_streamed = AgentRunner._run_single_turn_streamed - @wraps(AgentRunner._run_single_turn_streamed) + @wraps(AgentRunner._run_single_turn_streamed.__func__) async def old_wrapped_run_single_turn_streamed( cls: "agents.Runner", *args: "Any", **kwargs: "Any" - ) -> "Any": + ) -> "SingleStepResult": return await _run_single_turn_streamed( original_run_single_turn_streamed, *args, **kwargs ) From 9c9973a2e5f7e08ffda7e7d6dabfc4499f1fa766 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 12 Feb 2026 11:30:48 +0100 Subject: [PATCH 14/16] . --- sentry_sdk/integrations/openai_agents/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index e9d57e7b51..c643c567bd 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -146,7 +146,7 @@ async def new_wrapped_run_single_turn_streamed( @wraps(original_execute_handoffs) async def new_wrapped_execute_handoffs( *args: "Any", **kwargs: "Any" - ) -> "Any": + ) -> "SingleStepResult": return await _execute_handoffs( original_execute_handoffs, *args, **kwargs ) @@ -210,7 +210,7 @@ async def old_wrapped_run_single_turn_streamed( @wraps(agents._run_impl.RunImpl.execute_handoffs.__func__) async def old_wrapped_execute_handoffs( cls: "agents.Runner", *args: "Any", **kwargs: "Any" - ) -> "Any": + ) -> "SingleStepResult": return await _execute_handoffs(original_execute_handoffs, *args, **kwargs) agents._run_impl.RunImpl.execute_handoffs = classmethod( From 18cb7377dc35bf8f297ed548e31c7665d7770f23 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 12 Feb 2026 11:36:45 +0100 Subject: [PATCH 15/16] . --- sentry_sdk/integrations/openai_agents/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index 1dbc82870b..cf420859dd 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -121,12 +121,12 @@ def new_wrapped_get_model( agents.run_internal.run_loop.get_model = new_wrapped_get_model @wraps(run_loop.run_single_turn) - async def patched_run_single_turn( + async def new_wrapped_run_single_turn( *args: "Any", **kwargs: "Any" ) -> "SingleStepResult": return await _run_single_turn(run_loop.run_single_turn, *args, **kwargs) - agents.run.run_single_turn = patched_run_single_turn + agents.run.run_single_turn = new_wrapped_run_single_turn return From 48aff66d2293c7cc627a1e915321507f6bcfd06d Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 12 Feb 2026 11:56:27 +0100 Subject: [PATCH 16/16] . --- sentry_sdk/integrations/openai_agents/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sentry_sdk/integrations/openai_agents/__init__.py b/sentry_sdk/integrations/openai_agents/__init__.py index 89e09169f2..9b3a670c2c 100644 --- a/sentry_sdk/integrations/openai_agents/__init__.py +++ b/sentry_sdk/integrations/openai_agents/__init__.py @@ -130,7 +130,7 @@ async def old_wrapped_get_all_tools( @wraps(AgentRunner._get_model.__func__) def old_wrapped_get_model( cls: "agents.Runner", agent: "agents.Agent", run_config: "agents.RunConfig" - ) -> "list[agents.Model]": + ) -> "agents.Model": return _get_model(original_get_model, agent, run_config) agents.run.AgentRunner._get_model = classmethod(old_wrapped_get_model)